Remove the HGST Flash Storage Driver
The HGST Flash Storage Suite Driver was marked unsupported in the Rocky release. Since being marked unsupported the 3rd Party CI still has not reported for 427 days. This does not meet Cinder's CI requirements so the driver is being removed. Change-Id: I91f511d0727da1bad77ee913afe0a0f137d289f8
This commit is contained in:
parent
2a502cf9c3
commit
48fbd291a7
@ -99,7 +99,6 @@ from cinder.volume.drivers.fujitsu import eternus_dx_common as \
|
|||||||
cinder_volume_drivers_fujitsu_eternusdxcommon
|
cinder_volume_drivers_fujitsu_eternusdxcommon
|
||||||
from cinder.volume.drivers.fusionstorage import dsware as \
|
from cinder.volume.drivers.fusionstorage import dsware as \
|
||||||
cinder_volume_drivers_fusionstorage_dsware
|
cinder_volume_drivers_fusionstorage_dsware
|
||||||
from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst
|
|
||||||
from cinder.volume.drivers.hpe import hpe_3par_common as \
|
from cinder.volume.drivers.hpe import hpe_3par_common as \
|
||||||
cinder_volume_drivers_hpe_hpe3parcommon
|
cinder_volume_drivers_hpe_hpe3parcommon
|
||||||
from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \
|
from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \
|
||||||
@ -293,7 +292,6 @@ def list_opts():
|
|||||||
cinder_volume_drivers_fujitsu_eternusdxcommon.
|
cinder_volume_drivers_fujitsu_eternusdxcommon.
|
||||||
FJ_ETERNUS_DX_OPT_opts,
|
FJ_ETERNUS_DX_OPT_opts,
|
||||||
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
|
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
|
||||||
cinder_volume_drivers_hgst.hgst_opts,
|
|
||||||
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
|
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
|
||||||
cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts,
|
cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts,
|
||||||
cinder_volume_drivers_huawei_huaweidriver.huawei_opts,
|
cinder_volume_drivers_huawei_huaweidriver.huawei_opts,
|
||||||
|
@ -1,941 +0,0 @@
|
|||||||
# Copyright (c) 2015 HGST Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from oslo_concurrency import processutils
|
|
||||||
from oslo_utils import units
|
|
||||||
|
|
||||||
from cinder import context
|
|
||||||
from cinder import exception
|
|
||||||
from cinder import test
|
|
||||||
from cinder.volume import configuration as conf
|
|
||||||
from cinder.volume.drivers.hgst import HGSTDriver
|
|
||||||
from cinder.volume import volume_types
|
|
||||||
|
|
||||||
|
|
||||||
class HGSTTestCase(test.TestCase):
|
|
||||||
|
|
||||||
# Need to mock these since we use them on driver creation
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def setUp(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Set up UUT and all the flags required for later fake_executes."""
|
|
||||||
super(HGSTTestCase, self).setUp()
|
|
||||||
self.mock_object(processutils, 'execute', self._fake_execute)
|
|
||||||
self._fail_vgc_cluster = False
|
|
||||||
self._fail_ip = False
|
|
||||||
self._fail_network_list = False
|
|
||||||
self._fail_domain_list = False
|
|
||||||
self._empty_domain_list = False
|
|
||||||
self._fail_host_storage = False
|
|
||||||
self._fail_space_list = False
|
|
||||||
self._fail_space_delete = False
|
|
||||||
self._fail_set_apphosts = False
|
|
||||||
self._fail_extend = False
|
|
||||||
self._request_cancel = False
|
|
||||||
self._return_blocked = 0
|
|
||||||
self.configuration = mock.Mock(spec=conf.Configuration)
|
|
||||||
self.configuration.safe_get = self._fake_safe_get
|
|
||||||
self._reset_configuration()
|
|
||||||
self.driver = HGSTDriver(configuration=self.configuration,
|
|
||||||
execute=self._fake_execute)
|
|
||||||
|
|
||||||
def _fake_safe_get(self, value):
|
|
||||||
"""Don't throw exception on missing parameters, return None."""
|
|
||||||
try:
|
|
||||||
val = getattr(self.configuration, value)
|
|
||||||
except AttributeError:
|
|
||||||
val = None
|
|
||||||
return val
|
|
||||||
|
|
||||||
def _reset_configuration(self):
|
|
||||||
"""Set safe and sane values for config params."""
|
|
||||||
self.configuration.num_volume_device_scan_tries = 1
|
|
||||||
self.configuration.volume_dd_blocksize = '1M'
|
|
||||||
self.configuration.volume_backend_name = 'hgst-1'
|
|
||||||
self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2:gbd0'
|
|
||||||
self.configuration.hgst_net = 'net1'
|
|
||||||
self.configuration.hgst_redundancy = '0'
|
|
||||||
self.configuration.hgst_space_user = 'kane'
|
|
||||||
self.configuration.hgst_space_group = 'xanadu'
|
|
||||||
self.configuration.hgst_space_mode = '0777'
|
|
||||||
|
|
||||||
def _parse_space_create(self, *cmd):
|
|
||||||
"""Eats a vgc-cluster space-create command line to a dict."""
|
|
||||||
self.created = {'storageserver': ''}
|
|
||||||
cmd = list(*cmd)
|
|
||||||
while cmd:
|
|
||||||
param = cmd.pop(0)
|
|
||||||
if param == "-n":
|
|
||||||
self.created['name'] = cmd.pop(0)
|
|
||||||
elif param == "-N":
|
|
||||||
self.created['net'] = cmd.pop(0)
|
|
||||||
elif param == "-s":
|
|
||||||
self.created['size'] = cmd.pop(0)
|
|
||||||
elif param == "--redundancy":
|
|
||||||
self.created['redundancy'] = cmd.pop(0)
|
|
||||||
elif param == "--user":
|
|
||||||
self.created['user'] = cmd.pop(0)
|
|
||||||
elif param == "--user":
|
|
||||||
self.created['user'] = cmd.pop(0)
|
|
||||||
elif param == "--group":
|
|
||||||
self.created['group'] = cmd.pop(0)
|
|
||||||
elif param == "--mode":
|
|
||||||
self.created['mode'] = cmd.pop(0)
|
|
||||||
elif param == "-S":
|
|
||||||
self.created['storageserver'] += cmd.pop(0) + ","
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _parse_space_extend(self, *cmd):
|
|
||||||
"""Eats a vgc-cluster space-extend commandline to a dict."""
|
|
||||||
self.extended = {'storageserver': ''}
|
|
||||||
cmd = list(*cmd)
|
|
||||||
while cmd:
|
|
||||||
param = cmd.pop(0)
|
|
||||||
if param == "-n":
|
|
||||||
self.extended['name'] = cmd.pop(0)
|
|
||||||
elif param == "-s":
|
|
||||||
self.extended['size'] = cmd.pop(0)
|
|
||||||
elif param == "-S":
|
|
||||||
self.extended['storageserver'] += cmd.pop(0) + ","
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if self._fail_extend:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
else:
|
|
||||||
return '', ''
|
|
||||||
|
|
||||||
def _parse_space_delete(self, *cmd):
|
|
||||||
"""Eats a vgc-cluster space-delete commandline to a dict."""
|
|
||||||
self.deleted = {}
|
|
||||||
cmd = list(*cmd)
|
|
||||||
while cmd:
|
|
||||||
param = cmd.pop(0)
|
|
||||||
if param == "-n":
|
|
||||||
self.deleted['name'] = cmd.pop(0)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if self._fail_space_delete:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
else:
|
|
||||||
return '', ''
|
|
||||||
|
|
||||||
def _parse_space_list(self, *cmd):
|
|
||||||
"""Eats a vgc-cluster space-list commandline to a dict."""
|
|
||||||
json = False
|
|
||||||
nameOnly = False
|
|
||||||
cmd = list(*cmd)
|
|
||||||
while cmd:
|
|
||||||
param = cmd.pop(0)
|
|
||||||
if param == "--json":
|
|
||||||
json = True
|
|
||||||
elif param == "--name-only":
|
|
||||||
nameOnly = True
|
|
||||||
elif param == "-n":
|
|
||||||
pass # Don't use the name here...
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if self._fail_space_list:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
elif nameOnly:
|
|
||||||
return "space1\nspace2\nvolume1\n", ''
|
|
||||||
elif json:
|
|
||||||
return HGST_SPACE_JSON, ''
|
|
||||||
else:
|
|
||||||
return '', ''
|
|
||||||
|
|
||||||
def _parse_network_list(self, *cmd):
|
|
||||||
"""Eat a network-list command and return error or results."""
|
|
||||||
if self._fail_network_list:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
else:
|
|
||||||
return NETWORK_LIST, ''
|
|
||||||
|
|
||||||
def _parse_domain_list(self, *cmd):
|
|
||||||
"""Eat a domain-list command and return error, empty, or results."""
|
|
||||||
if self._fail_domain_list:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
elif self._empty_domain_list:
|
|
||||||
return '', ''
|
|
||||||
else:
|
|
||||||
return "thisserver\nthatserver\nanotherserver\n", ''
|
|
||||||
|
|
||||||
def _fake_execute(self, *cmd, **kwargs):
|
|
||||||
"""Sudo hook to catch commands to allow running on all hosts."""
|
|
||||||
cmdlist = list(cmd)
|
|
||||||
exe = cmdlist.pop(0)
|
|
||||||
if exe == 'vgc-cluster':
|
|
||||||
exe = cmdlist.pop(0)
|
|
||||||
if exe == "request-cancel":
|
|
||||||
self._request_cancel = True
|
|
||||||
if self._return_blocked > 0:
|
|
||||||
return 'Request cancelled', ''
|
|
||||||
else:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
elif self._fail_vgc_cluster:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
elif exe == "--version":
|
|
||||||
return "HGST Solutions V2.5.0.0.x.x.x.x.x", ''
|
|
||||||
elif exe == "space-list":
|
|
||||||
return self._parse_space_list(cmdlist)
|
|
||||||
elif exe == "space-create":
|
|
||||||
self._parse_space_create(cmdlist)
|
|
||||||
if self._return_blocked > 0:
|
|
||||||
self._return_blocked = self._return_blocked - 1
|
|
||||||
out = "VGC_CREATE_000002\nBLOCKED\n"
|
|
||||||
raise processutils.ProcessExecutionError(stdout=out,
|
|
||||||
exit_code=1)
|
|
||||||
return '', ''
|
|
||||||
elif exe == "space-delete":
|
|
||||||
return self._parse_space_delete(cmdlist)
|
|
||||||
elif exe == "space-extend":
|
|
||||||
return self._parse_space_extend(cmdlist)
|
|
||||||
elif exe == "host-storage":
|
|
||||||
if self._fail_host_storage:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
return HGST_HOST_STORAGE, ''
|
|
||||||
elif exe == "domain-list":
|
|
||||||
return self._parse_domain_list()
|
|
||||||
elif exe == "network-list":
|
|
||||||
return self._parse_network_list()
|
|
||||||
elif exe == "space-set-apphosts":
|
|
||||||
if self._fail_set_apphosts:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
return '', ''
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
elif exe == 'ip':
|
|
||||||
if self._fail_ip:
|
|
||||||
raise processutils.ProcessExecutionError(exit_code=1)
|
|
||||||
else:
|
|
||||||
return IP_OUTPUT, ''
|
|
||||||
elif exe == 'dd':
|
|
||||||
self.dd_count = -1
|
|
||||||
for p in cmdlist:
|
|
||||||
if 'count=' in p:
|
|
||||||
self.dd_count = int(p[6:])
|
|
||||||
elif 'bs=' in p:
|
|
||||||
self.bs = p[3:]
|
|
||||||
return DD_OUTPUT, ''
|
|
||||||
else:
|
|
||||||
return '', ''
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_vgc_cluster_not_present(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when vgc-cluster returns an error."""
|
|
||||||
# Should pass
|
|
||||||
self._fail_vgc_cluster = False
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exception
|
|
||||||
self._fail_vgc_cluster = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam,
|
|
||||||
mock_pwnam):
|
|
||||||
"""Test when hgst_redundancy config parameter not 0 or 1."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exceptions
|
|
||||||
self.configuration.hgst_redundancy = ''
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
self.configuration.hgst_redundancy = 'Fred'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when hgst_space_user doesn't map to UNIX user."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exceptions
|
|
||||||
mock_pwnam.side_effect = KeyError()
|
|
||||||
self.configuration.hgst_space_user = ''
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
self.configuration.hgst_space_user = 'Fred!`'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_group_invalid(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when hgst_space_group doesn't map to UNIX group."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exceptions
|
|
||||||
mock_grnam.side_effect = KeyError()
|
|
||||||
self.configuration.hgst_space_group = ''
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
self.configuration.hgst_space_group = 'Fred!`'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when mode for created spaces isn't proper format."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exceptions
|
|
||||||
self.configuration.hgst_space_mode = ''
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
self.configuration.hgst_space_mode = 'Fred'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when hgst_net not in the domain."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Should throw exceptions
|
|
||||||
self._fail_network_list = True
|
|
||||||
self.configuration.hgst_net = 'Fred'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
self._fail_network_list = False
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when IP ADDR command fails."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Throw exception, need to clear internal cached host in driver
|
|
||||||
self._fail_ip = True
|
|
||||||
self.driver._vgc_host = None
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_domain_list_fails(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when domain-list fails for the domain."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Throw exception, need to clear internal cached host in driver
|
|
||||||
self._fail_domain_list = True
|
|
||||||
self.driver._vgc_host = None
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam):
|
|
||||||
"""Test exception when Cinder host not domain member."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Throw exception, need to clear internal cached host in driver
|
|
||||||
self._empty_domain_list = True
|
|
||||||
self.driver._vgc_host = None
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
@mock.patch('pwd.getpwnam', return_value=1)
|
|
||||||
@mock.patch('grp.getgrnam', return_value=1)
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_parameter_storageservers_invalid(self, mock_ghn, mock_grnam,
|
|
||||||
mock_pwnam):
|
|
||||||
"""Test exception when the storage servers are invalid/missing."""
|
|
||||||
# Should pass
|
|
||||||
self.driver.check_for_setup_error()
|
|
||||||
# Storage_hosts missing
|
|
||||||
self.configuration.hgst_storage_servers = ''
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
# missing a : between host and devnode
|
|
||||||
self.configuration.hgst_storage_servers = 'stor1,stor2'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
# missing a : between host and devnode
|
|
||||||
self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2'
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
# Host not in cluster
|
|
||||||
self.configuration.hgst_storage_servers = 'stor1:gbd0'
|
|
||||||
self._fail_host_storage = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.check_for_setup_error)
|
|
||||||
|
|
||||||
def test_update_volume_stats(self):
|
|
||||||
"""Get cluster space available, should pass."""
|
|
||||||
actual = self.driver.get_volume_stats(True)
|
|
||||||
self.assertEqual('HGST', actual['vendor_name'])
|
|
||||||
self.assertEqual('hgst', actual['storage_protocol'])
|
|
||||||
self.assertEqual(90, actual['total_capacity_gb'])
|
|
||||||
self.assertEqual(87, actual['free_capacity_gb'])
|
|
||||||
self.assertEqual(0, actual['reserved_percentage'])
|
|
||||||
|
|
||||||
def test_update_volume_stats_redundancy(self):
|
|
||||||
"""Get cluster space available, half-sized - 1 for mirrors."""
|
|
||||||
self.configuration.hgst_redundancy = '1'
|
|
||||||
actual = self.driver.get_volume_stats(True)
|
|
||||||
self.assertEqual('HGST', actual['vendor_name'])
|
|
||||||
self.assertEqual('hgst', actual['storage_protocol'])
|
|
||||||
self.assertEqual(44, actual['total_capacity_gb'])
|
|
||||||
self.assertEqual(43, actual['free_capacity_gb'])
|
|
||||||
self.assertEqual(0, actual['reserved_percentage'])
|
|
||||||
|
|
||||||
def test_update_volume_stats_cached(self):
|
|
||||||
"""Get cached cluster space, should not call executable."""
|
|
||||||
self._fail_host_storage = True
|
|
||||||
actual = self.driver.get_volume_stats(False)
|
|
||||||
self.assertEqual('HGST', actual['vendor_name'])
|
|
||||||
self.assertEqual('hgst', actual['storage_protocol'])
|
|
||||||
self.assertEqual(90, actual['total_capacity_gb'])
|
|
||||||
self.assertEqual(87, actual['free_capacity_gb'])
|
|
||||||
self.assertEqual(0, actual['reserved_percentage'])
|
|
||||||
|
|
||||||
def test_update_volume_stats_error(self):
|
|
||||||
"""Test that when host-storage gives an error, return unknown."""
|
|
||||||
self._fail_host_storage = True
|
|
||||||
actual = self.driver.get_volume_stats(True)
|
|
||||||
self.assertEqual('HGST', actual['vendor_name'])
|
|
||||||
self.assertEqual('hgst', actual['storage_protocol'])
|
|
||||||
self.assertEqual('unknown', actual['total_capacity_gb'])
|
|
||||||
self.assertEqual('unknown', actual['free_capacity_gb'])
|
|
||||||
self.assertEqual(0, actual['reserved_percentage'])
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_volume(self, mock_ghn):
|
|
||||||
"""Test volume creation, ensure appropriate size expansion/name."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10}
|
|
||||||
ret = self.driver.create_volume(volume)
|
|
||||||
expected = {'redundancy': '0', 'group': 'xanadu',
|
|
||||||
'name': 'volume10', 'mode': '0777',
|
|
||||||
'user': 'kane', 'net': 'net1',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,',
|
|
||||||
'size': '12'}
|
|
||||||
self.assertDictEqual(expected, self.created)
|
|
||||||
# Check the returned provider, note that provider_id is hashed
|
|
||||||
expected_pid = {'provider_id': 'volume10'}
|
|
||||||
self.assertDictEqual(expected_pid, ret)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_volume_name_creation_fail(self, mock_ghn):
|
|
||||||
"""Test volume creation exception when can't make a hashed name."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10}
|
|
||||||
self._fail_space_list = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.create_volume, volume)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_snapshot(self, mock_ghn):
|
|
||||||
"""Test creating a snapshot, ensure full data of original copied."""
|
|
||||||
# Now snapshot the volume and check commands
|
|
||||||
snapshot = {'volume_name': 'volume10',
|
|
||||||
'volume_id': 'xxx', 'display_name': 'snap10',
|
|
||||||
'name': '123abc', 'volume_size': 10, 'id': '123abc',
|
|
||||||
'volume': {'provider_id': 'space10'}}
|
|
||||||
ret = self.driver.create_snapshot(snapshot)
|
|
||||||
# We must copy entier underlying storage, ~12GB, not just 10GB
|
|
||||||
self.assertEqual(11444 * units.Mi, self.dd_count)
|
|
||||||
self.assertEqual('1M', self.bs)
|
|
||||||
# Check space-create command
|
|
||||||
expected = {'redundancy': '0', 'group': 'xanadu',
|
|
||||||
'name': snapshot['display_name'], 'mode': '0777',
|
|
||||||
'user': 'kane', 'net': 'net1',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,',
|
|
||||||
'size': '12'}
|
|
||||||
self.assertDictEqual(expected, self.created)
|
|
||||||
# Check the returned provider
|
|
||||||
expected_pid = {'provider_id': 'snap10'}
|
|
||||||
self.assertDictEqual(expected_pid, ret)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_cloned_volume(self, mock_ghn):
|
|
||||||
"""Test creating a clone, ensure full size is copied from original."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
orig = {'id': '1', 'name': 'volume1', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10,
|
|
||||||
'provider_id': 'space_orig'}
|
|
||||||
clone = {'id': '2', 'name': 'clone1', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10}
|
|
||||||
pid = self.driver.create_cloned_volume(clone, orig)
|
|
||||||
# We must copy entier underlying storage, ~12GB, not just 10GB
|
|
||||||
self.assertEqual(11444 * units.Mi, self.dd_count)
|
|
||||||
self.assertEqual('1M', self.bs)
|
|
||||||
# Check space-create command
|
|
||||||
expected = {'redundancy': '0', 'group': 'xanadu',
|
|
||||||
'name': 'clone1', 'mode': '0777',
|
|
||||||
'user': 'kane', 'net': 'net1',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,',
|
|
||||||
'size': '12'}
|
|
||||||
self.assertDictEqual(expected, self.created)
|
|
||||||
# Check the returned provider
|
|
||||||
expected_pid = {'provider_id': 'clone1'}
|
|
||||||
self.assertDictEqual(expected_pid, pid)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_add_cinder_apphosts_fails(self, mock_ghn):
|
|
||||||
"""Test exception when set-apphost can't connect volume to host."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
orig = {'id': '1', 'name': 'volume1', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10,
|
|
||||||
'provider_id': 'space_orig'}
|
|
||||||
clone = {'id': '2', 'name': 'clone1', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10}
|
|
||||||
self._fail_set_apphosts = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.create_cloned_volume, clone, orig)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_volume_from_snapshot(self, mock_ghn):
|
|
||||||
"""Test creating volume from snapshot, ensure full space copy."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
snap = {'id': '1', 'name': 'volume1', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10,
|
|
||||||
'provider_id': 'space_orig'}
|
|
||||||
volume = {'id': '2', 'name': 'volume2', 'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'], 'size': 10}
|
|
||||||
pid = self.driver.create_volume_from_snapshot(volume, snap)
|
|
||||||
# We must copy entier underlying storage, ~12GB, not just 10GB
|
|
||||||
self.assertEqual(11444 * units.Mi, self.dd_count)
|
|
||||||
self.assertEqual('1M', self.bs)
|
|
||||||
# Check space-create command
|
|
||||||
expected = {'redundancy': '0', 'group': 'xanadu',
|
|
||||||
'name': 'volume2', 'mode': '0777',
|
|
||||||
'user': 'kane', 'net': 'net1',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,',
|
|
||||||
'size': '12'}
|
|
||||||
self.assertDictEqual(expected, self.created)
|
|
||||||
# Check the returned provider
|
|
||||||
expected_pid = {'provider_id': 'volume2'}
|
|
||||||
self.assertDictEqual(expected_pid, pid)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_volume_blocked(self, mock_ghn):
|
|
||||||
"""Test volume creation where only initial space-create is blocked.
|
|
||||||
|
|
||||||
This should actually pass because we are blocked byt return an error
|
|
||||||
in request-cancel, meaning that it got unblocked before we could kill
|
|
||||||
the space request.
|
|
||||||
"""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10}
|
|
||||||
self._return_blocked = 1 # Block & fail cancel => create succeeded
|
|
||||||
ret = self.driver.create_volume(volume)
|
|
||||||
expected = {'redundancy': '0', 'group': 'xanadu',
|
|
||||||
'name': 'volume10', 'mode': '0777',
|
|
||||||
'user': 'kane', 'net': 'net1',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,',
|
|
||||||
'size': '12'}
|
|
||||||
self.assertDictEqual(expected, self.created)
|
|
||||||
# Check the returned provider
|
|
||||||
expected_pid = {'provider_id': 'volume10'}
|
|
||||||
self.assertDictEqual(expected_pid, ret)
|
|
||||||
self.assertTrue(self._request_cancel)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_create_volume_blocked_and_fail(self, mock_ghn):
|
|
||||||
"""Test volume creation where space-create blocked permanently.
|
|
||||||
|
|
||||||
This should fail because the initial create was blocked and the
|
|
||||||
request-cancel succeeded, meaning the create operation never
|
|
||||||
completed.
|
|
||||||
"""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10}
|
|
||||||
self._return_blocked = 2 # Block & pass cancel => create failed. :(
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.create_volume, volume)
|
|
||||||
self.assertTrue(self._request_cancel)
|
|
||||||
|
|
||||||
def test_delete_volume(self):
|
|
||||||
"""Test deleting existing volume, ensure proper name used."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self.driver.delete_volume(volume)
|
|
||||||
expected = {'name': 'volume10'}
|
|
||||||
self.assertDictEqual(expected, self.deleted)
|
|
||||||
|
|
||||||
def test_delete_volume_failure_modes(self):
|
|
||||||
"""Test cases where space-delete fails, but OS delete is still OK."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self._fail_space_delete = True
|
|
||||||
# This should not throw an exception, space-delete failure not problem
|
|
||||||
self.driver.delete_volume(volume)
|
|
||||||
self._fail_space_delete = False
|
|
||||||
volume['provider_id'] = None
|
|
||||||
# This should also not throw an exception
|
|
||||||
self.driver.delete_volume(volume)
|
|
||||||
|
|
||||||
def test_delete_snapshot(self):
|
|
||||||
"""Test deleting a snapshot, ensure proper name is removed."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
snapshot = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'snap10'}
|
|
||||||
self.driver.delete_snapshot(snapshot)
|
|
||||||
expected = {'name': 'snap10'}
|
|
||||||
self.assertDictEqual(expected, self.deleted)
|
|
||||||
|
|
||||||
def test_extend_volume(self):
|
|
||||||
"""Test extending a volume, check the size in GB vs. GiB."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self.extended = {'name': '', 'size': '0',
|
|
||||||
'storageserver': ''}
|
|
||||||
self.driver.extend_volume(volume, 12)
|
|
||||||
expected = {'name': 'volume10', 'size': '2',
|
|
||||||
'storageserver': 'stor1:gbd0,stor2:gbd0,'}
|
|
||||||
self.assertDictEqual(expected, self.extended)
|
|
||||||
|
|
||||||
def test_extend_volume_noextend(self):
|
|
||||||
"""Test extending a volume where Space does not need to be enlarged.
|
|
||||||
|
|
||||||
Because Spaces are generated somewhat larger than the requested size
|
|
||||||
from OpenStack due to the base10(HGST)/base2(OS) mismatch, they can
|
|
||||||
sometimes be larger than requested from OS. In that case a
|
|
||||||
volume_extend may actually be a noop since the volume is already large
|
|
||||||
enough to satisfy OS's request.
|
|
||||||
"""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self.extended = {'name': '', 'size': '0',
|
|
||||||
'storageserver': ''}
|
|
||||||
self.driver.extend_volume(volume, 10)
|
|
||||||
expected = {'name': '', 'size': '0',
|
|
||||||
'storageserver': ''}
|
|
||||||
self.assertDictEqual(expected, self.extended)
|
|
||||||
|
|
||||||
def test_space_list_fails(self):
|
|
||||||
"""Test exception is thrown when we can't call space-list."""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self.extended = {'name': '', 'size': '0',
|
|
||||||
'storageserver': ''}
|
|
||||||
self._fail_space_list = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.extend_volume, volume, 12)
|
|
||||||
|
|
||||||
def test_cli_error_not_blocked(self):
|
|
||||||
"""Test the _blocked handler's handlinf of a non-blocked error.
|
|
||||||
|
|
||||||
The _handle_blocked handler is called on any process errors in the
|
|
||||||
code. If the error was not caused by a blocked command condition
|
|
||||||
(syntax error, out of space, etc.) then it should just throw the
|
|
||||||
exception and not try and retry the command.
|
|
||||||
"""
|
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
extra_specs = {}
|
|
||||||
type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)
|
|
||||||
volume = {'id': '1', 'name': 'volume1',
|
|
||||||
'display_name': '',
|
|
||||||
'volume_type_id': type_ref['id'],
|
|
||||||
'size': 10,
|
|
||||||
'provider_id': 'volume10'}
|
|
||||||
self.extended = {'name': '', 'size': '0',
|
|
||||||
'storageserver': ''}
|
|
||||||
self._fail_extend = True
|
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
|
||||||
self.driver.extend_volume, volume, 12)
|
|
||||||
self.assertFalse(self._request_cancel)
|
|
||||||
|
|
||||||
@mock.patch('socket.gethostbyname', return_value='123.123.123.123')
|
|
||||||
def test_initialize_connection(self, moch_ghn):
|
|
||||||
"""Test that the connection_info for Nova makes sense."""
|
|
||||||
volume = {'name': '123', 'provider_id': 'spacey'}
|
|
||||||
conn = self.driver.initialize_connection(volume, None)
|
|
||||||
expected = {'name': 'spacey', 'noremovehost': 'thisserver'}
|
|
||||||
self.assertDictEqual(expected, conn['data'])
|
|
||||||
|
|
||||||
# Below are some command outputs we emulate
|
|
||||||
IP_OUTPUT = """
|
|
||||||
3: em2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state
|
|
||||||
link/ether 00:25:90:d9:18:09 brd ff:ff:ff:ff:ff:ff
|
|
||||||
inet 192.168.0.23/24 brd 192.168.0.255 scope global em2
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
inet6 fe80::225:90ff:fed9:1809/64 scope link
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
|
|
||||||
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
|
||||||
inet 123.123.123.123/8 scope host lo
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
inet 169.254.169.254/32 scope link lo
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
inet6 ::1/128 scope host
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
2: em1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master
|
|
||||||
link/ether 00:25:90:d9:18:08 brd ff:ff:ff:ff:ff:ff
|
|
||||||
inet6 fe80::225:90ff:fed9:1808/64 scope link
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
"""
|
|
||||||
|
|
||||||
HGST_HOST_STORAGE = """
|
|
||||||
{
|
|
||||||
"hostStatus": [
|
|
||||||
{
|
|
||||||
"node": "tm33.virident.info",
|
|
||||||
"up": true,
|
|
||||||
"isManager": true,
|
|
||||||
"cardStatus": [
|
|
||||||
{
|
|
||||||
"cardName": "/dev/sda3",
|
|
||||||
"cardSerialNumber": "002f09b4037a9d521c007ee4esda3",
|
|
||||||
"cardStatus": "Good",
|
|
||||||
"cardStateDetails": "Normal",
|
|
||||||
"cardActionRequired": "",
|
|
||||||
"cardTemperatureC": 0,
|
|
||||||
"deviceType": "Generic",
|
|
||||||
"cardTemperatureState": "Safe",
|
|
||||||
"partitionStatus": [
|
|
||||||
{
|
|
||||||
"partName": "/dev/gbd0",
|
|
||||||
"partitionState": "READY",
|
|
||||||
"usableCapacityBytes": 98213822464,
|
|
||||||
"totalReadBytes": 0,
|
|
||||||
"totalWriteBytes": 0,
|
|
||||||
"remainingLifePCT": 100,
|
|
||||||
"flashReservesLeftPCT": 100,
|
|
||||||
"fmc": true,
|
|
||||||
"vspaceCapacityAvailable": 94947041280,
|
|
||||||
"vspaceReducedCapacityAvailable": 87194279936,
|
|
||||||
"_partitionID": "002f09b4037a9d521c007ee4esda3:0",
|
|
||||||
"_usedSpaceBytes": 3266781184,
|
|
||||||
"_enabledSpaceBytes": 3266781184,
|
|
||||||
"_disabledSpaceBytes": 0
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"driverStatus": {
|
|
||||||
"vgcdriveDriverLoaded": true,
|
|
||||||
"vhaDriverLoaded": true,
|
|
||||||
"vcacheDriverLoaded": true,
|
|
||||||
"vlvmDriverLoaded": true,
|
|
||||||
"ipDataProviderLoaded": true,
|
|
||||||
"ibDataProviderLoaded": false,
|
|
||||||
"driverUptimeSecs": 4800,
|
|
||||||
"rVersion": "20368.d55ec22.master"
|
|
||||||
},
|
|
||||||
"totalCapacityBytes": 98213822464,
|
|
||||||
"totalUsedBytes": 3266781184,
|
|
||||||
"totalEnabledBytes": 3266781184,
|
|
||||||
"totalDisabledBytes": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"node": "tm32.virident.info",
|
|
||||||
"up": true,
|
|
||||||
"isManager": false,
|
|
||||||
"cardStatus": [],
|
|
||||||
"driverStatus": {
|
|
||||||
"vgcdriveDriverLoaded": true,
|
|
||||||
"vhaDriverLoaded": true,
|
|
||||||
"vcacheDriverLoaded": true,
|
|
||||||
"vlvmDriverLoaded": true,
|
|
||||||
"ipDataProviderLoaded": true,
|
|
||||||
"ibDataProviderLoaded": false,
|
|
||||||
"driverUptimeSecs": 0,
|
|
||||||
"rVersion": "20368.d55ec22.master"
|
|
||||||
},
|
|
||||||
"totalCapacityBytes": 0,
|
|
||||||
"totalUsedBytes": 0,
|
|
||||||
"totalEnabledBytes": 0,
|
|
||||||
"totalDisabledBytes": 0
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"totalCapacityBytes": 98213822464,
|
|
||||||
"totalUsedBytes": 3266781184,
|
|
||||||
"totalEnabledBytes": 3266781184,
|
|
||||||
"totalDisabledBytes": 0
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
HGST_SPACE_JSON = """
|
|
||||||
{
|
|
||||||
"resources": [
|
|
||||||
{
|
|
||||||
"resourceType": "vLVM-L",
|
|
||||||
"resourceID": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db",
|
|
||||||
"state": "OFFLINE",
|
|
||||||
"instanceStates": {},
|
|
||||||
"redundancy": 0,
|
|
||||||
"sizeBytes": 12000000000,
|
|
||||||
"name": "volume10",
|
|
||||||
"nodes": [],
|
|
||||||
"networks": [
|
|
||||||
"net1"
|
|
||||||
],
|
|
||||||
"components": [
|
|
||||||
{
|
|
||||||
"resourceType": "vLVM-S",
|
|
||||||
"resourceID": "vLVM-S:698cdb43-54da-863e-eb10-6275f47b8ed2",
|
|
||||||
"redundancy": 0,
|
|
||||||
"order": 0,
|
|
||||||
"sizeBytes": 12000000000,
|
|
||||||
"numStripes": 1,
|
|
||||||
"stripeSizeBytes": null,
|
|
||||||
"name": "volume10s00",
|
|
||||||
"state": "OFFLINE",
|
|
||||||
"instanceStates": {},
|
|
||||||
"components": [
|
|
||||||
{
|
|
||||||
"name": "volume10h00",
|
|
||||||
"resourceType": "vHA",
|
|
||||||
"resourceID": "vHA:3e86da54-40db-8c69-0300-0000ac10476e",
|
|
||||||
"redundancy": 0,
|
|
||||||
"sizeBytes": 12000000000,
|
|
||||||
"state": "GOOD",
|
|
||||||
"components": [
|
|
||||||
{
|
|
||||||
"name": "volume10h00",
|
|
||||||
"vspaceType": "vHA",
|
|
||||||
"vspaceRole": "primary",
|
|
||||||
"storageObjectID": "vHA:3e86da54-40db-8c69--18130019e486",
|
|
||||||
"state": "Disconnected (DCS)",
|
|
||||||
"node": "tm33.virident.info",
|
|
||||||
"partName": "/dev/gbd0"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"crState": "GOOD"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "volume10v00",
|
|
||||||
"resourceType": "vShare",
|
|
||||||
"resourceID": "vShare:3f86da54-41db-8c69-0300-ecf4bbcc14cc",
|
|
||||||
"redundancy": 0,
|
|
||||||
"order": 0,
|
|
||||||
"sizeBytes": 12000000000,
|
|
||||||
"state": "GOOD",
|
|
||||||
"components": [
|
|
||||||
{
|
|
||||||
"name": "volume10v00",
|
|
||||||
"vspaceType": "vShare",
|
|
||||||
"vspaceRole": "target",
|
|
||||||
"storageObjectID": "vShare:3f86da54-41db-8c64bbcc14cc:T",
|
|
||||||
"state": "Started",
|
|
||||||
"node": "tm33.virident.info",
|
|
||||||
"partName": "/dev/gbd0_volume10h00"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"_size": "12GB",
|
|
||||||
"_state": "OFFLINE",
|
|
||||||
"_ugm": "",
|
|
||||||
"_nets": "net1",
|
|
||||||
"_hosts": "tm33.virident.info(12GB,NC)",
|
|
||||||
"_ahosts": "",
|
|
||||||
"_shosts": "tm33.virident.info(12GB)",
|
|
||||||
"_name": "volume10",
|
|
||||||
"_node": "",
|
|
||||||
"_type": "vLVM-L",
|
|
||||||
"_detail": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db",
|
|
||||||
"_device": ""
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
NETWORK_LIST = """
|
|
||||||
Network Name Type Flags Description
|
|
||||||
------------ ---- ---------- ------------------------
|
|
||||||
net1 IPv4 autoConfig 192.168.0.0/24 1Gb/s
|
|
||||||
net2 IPv4 autoConfig 192.168.10.0/24 10Gb/s
|
|
||||||
"""
|
|
||||||
|
|
||||||
DD_OUTPUT = """
|
|
||||||
1+0 records in
|
|
||||||
1+0 records out
|
|
||||||
1024 bytes (1.0 kB) copied, 0.000427529 s, 2.4 MB/s
|
|
||||||
"""
|
|
@ -1,610 +0,0 @@
|
|||||||
# Copyright 2015 HGST
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
Desc : Driver to store Cinder volumes using HGST Flash Storage Suite
|
|
||||||
Require : HGST Flash Storage Suite
|
|
||||||
Author : Earle F. Philhower, III <earle.philhower.iii@hgst.com>
|
|
||||||
"""
|
|
||||||
|
|
||||||
import grp
|
|
||||||
import json
|
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import six
|
|
||||||
import socket
|
|
||||||
import string
|
|
||||||
|
|
||||||
from oslo_concurrency import lockutils
|
|
||||||
from oslo_concurrency import processutils
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import units
|
|
||||||
|
|
||||||
from cinder import exception
|
|
||||||
from cinder.i18n import _
|
|
||||||
from cinder.image import image_utils
|
|
||||||
from cinder import interface
|
|
||||||
from cinder.volume import configuration
|
|
||||||
from cinder.volume import driver
|
|
||||||
from cinder.volume import utils as volutils
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
hgst_opts = [
|
|
||||||
cfg.StrOpt('hgst_net',
|
|
||||||
default='Net 1 (IPv4)',
|
|
||||||
help='Space network name to use for data transfer'),
|
|
||||||
cfg.StrOpt('hgst_storage_servers',
|
|
||||||
default='os:gbd0',
|
|
||||||
help='Comma separated list of Space storage servers:devices. '
|
|
||||||
'ex: os1_stor:gbd0,os2_stor:gbd0'),
|
|
||||||
cfg.StrOpt('hgst_redundancy',
|
|
||||||
default='0',
|
|
||||||
help='Should spaces be redundantly stored (1/0)'),
|
|
||||||
cfg.StrOpt('hgst_space_user',
|
|
||||||
default='root',
|
|
||||||
help='User to own created spaces'),
|
|
||||||
cfg.StrOpt('hgst_space_group',
|
|
||||||
default='disk',
|
|
||||||
help='Group to own created spaces'),
|
|
||||||
cfg.StrOpt('hgst_space_mode',
|
|
||||||
default='0600',
|
|
||||||
help='UNIX mode for created spaces'),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(hgst_opts, group=configuration.SHARED_CONF_GROUP)
|
|
||||||
|
|
||||||
|
|
||||||
@interface.volumedriver
|
|
||||||
class HGSTDriver(driver.VolumeDriver):
|
|
||||||
"""This is the Class to set in cinder.conf (volume_driver).
|
|
||||||
|
|
||||||
Implements a Cinder Volume driver which creates a HGST Space for each
|
|
||||||
Cinder Volume or Snapshot requested. Use the vgc-cluster CLI to do
|
|
||||||
all management operations.
|
|
||||||
|
|
||||||
The Cinder host will nominally have all Spaces made visible to it,
|
|
||||||
while individual compute nodes will only have Spaces connected to KVM
|
|
||||||
instances connected.
|
|
||||||
"""
|
|
||||||
|
|
||||||
VERSION = '1.0.0'
|
|
||||||
|
|
||||||
# ThirdPartySystems wiki page
|
|
||||||
CI_WIKI_NAME = "HGST_Solutions_CI"
|
|
||||||
|
|
||||||
VGCCLUSTER = 'vgc-cluster'
|
|
||||||
SPACEGB = units.G - 16 * units.M # Workaround for shrinkage Bug 28320
|
|
||||||
BLOCKED = "BLOCKED" # Exit code when a command is blocked
|
|
||||||
|
|
||||||
# TODO(jsbryant) Remove driver in Stein if CI is not fixed
|
|
||||||
SUPPORTED = False
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
"""Initialize our protocol descriptor/etc."""
|
|
||||||
super(HGSTDriver, self).__init__(*args, **kwargs)
|
|
||||||
self.configuration.append_config_values(hgst_opts)
|
|
||||||
self._vgc_host = None
|
|
||||||
self.check_for_setup_error()
|
|
||||||
self._stats = {'driver_version': self.VERSION,
|
|
||||||
'reserved_percentage': 0,
|
|
||||||
'storage_protocol': 'hgst',
|
|
||||||
'total_capacity_gb': 'unknown',
|
|
||||||
'free_capacity_gb': 'unknown',
|
|
||||||
'vendor_name': 'HGST',
|
|
||||||
}
|
|
||||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
|
||||||
self._stats['volume_backend_name'] = backend_name or 'hgst'
|
|
||||||
self.update_volume_stats()
|
|
||||||
|
|
||||||
def _log_cli_err(self, err):
|
|
||||||
"""Dumps the full command output to a logfile in error cases."""
|
|
||||||
LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n"
|
|
||||||
"err: %(stderr)s",
|
|
||||||
{'cmd': err.cmd, 'code': err.exit_code,
|
|
||||||
'stdout': err.stdout, 'stderr': err.stderr})
|
|
||||||
|
|
||||||
def _find_vgc_host(self):
|
|
||||||
"""Finds vgc-cluster hostname for this box."""
|
|
||||||
params = [self.VGCCLUSTER, "domain-list", "-1"]
|
|
||||||
try:
|
|
||||||
out, unused = self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Unable to get list of domain members, check that "
|
|
||||||
"the cluster is running.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
domain = out.splitlines()
|
|
||||||
params = ["ip", "addr", "list"]
|
|
||||||
try:
|
|
||||||
out, unused = self._execute(*params, run_as_root=False)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Unable to get list of IP addresses on this host, "
|
|
||||||
"check permissions and networking.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
nets = out.splitlines()
|
|
||||||
for host in domain:
|
|
||||||
try:
|
|
||||||
ip = socket.gethostbyname(host)
|
|
||||||
for l in nets:
|
|
||||||
x = l.strip()
|
|
||||||
if x.startswith("inet %s/" % ip):
|
|
||||||
return host
|
|
||||||
except socket.error:
|
|
||||||
pass
|
|
||||||
msg = _("Current host isn't part of HGST domain.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
def _hostname(self):
|
|
||||||
"""Returns hostname to use for cluster operations on this box."""
|
|
||||||
if self._vgc_host is None:
|
|
||||||
self._vgc_host = self._find_vgc_host()
|
|
||||||
return self._vgc_host
|
|
||||||
|
|
||||||
def _make_server_list(self):
|
|
||||||
"""Converts a comma list into params for use by HGST CLI."""
|
|
||||||
csv = self.configuration.safe_get('hgst_storage_servers')
|
|
||||||
servers = csv.split(",")
|
|
||||||
params = []
|
|
||||||
for server in servers:
|
|
||||||
params.append('-S')
|
|
||||||
params.append(six.text_type(server))
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _make_space_name(self, name):
|
|
||||||
"""Generates the hashed name for the space from the name.
|
|
||||||
|
|
||||||
This must be called in a locked context as there are race conditions
|
|
||||||
where 2 contexts could both pick what they think is an unallocated
|
|
||||||
space name, and fail later on due to that conflict.
|
|
||||||
"""
|
|
||||||
# Sanitize the name string
|
|
||||||
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
|
|
||||||
name = ''.join(c for c in name if c in valid_chars)
|
|
||||||
name = name.strip(".") # Remove any leading .s from evil users
|
|
||||||
name = name or "space" # In case of all illegal chars, safe default
|
|
||||||
# Start out with just the name, truncated to 14 characters
|
|
||||||
outname = name[0:13]
|
|
||||||
# See what names already defined
|
|
||||||
params = [self.VGCCLUSTER, "space-list", "--name-only"]
|
|
||||||
try:
|
|
||||||
out, unused = self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Unable to get list of spaces to make new name. Please "
|
|
||||||
"verify the cluster is running.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
names = out.splitlines()
|
|
||||||
# And anything in /dev/* is also illegal
|
|
||||||
names += os.listdir("/dev") # Do it the Python way!
|
|
||||||
names += ['.', '..'] # Not included above
|
|
||||||
# While there's a conflict, add incrementing digits until it passes
|
|
||||||
itr = 0
|
|
||||||
while outname in names:
|
|
||||||
itrstr = six.text_type(itr)
|
|
||||||
outname = outname[0:13 - len(itrstr)] + itrstr
|
|
||||||
itr += 1
|
|
||||||
return outname
|
|
||||||
|
|
||||||
def _get_space_size_redundancy(self, space_name):
|
|
||||||
"""Parse space output to get allocated size and redundancy."""
|
|
||||||
params = [self.VGCCLUSTER, "space-list", "-n", space_name, "--json"]
|
|
||||||
try:
|
|
||||||
out, unused = self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Unable to get information on space %(space)s, please "
|
|
||||||
"verify that the cluster is running and "
|
|
||||||
"connected.") % {'space': space_name}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
ret = json.loads(out)
|
|
||||||
retval = {}
|
|
||||||
retval['redundancy'] = int(ret['resources'][0]['redundancy'])
|
|
||||||
retval['sizeBytes'] = int(ret['resources'][0]['sizeBytes'])
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def _adjust_size_g(self, size_g):
|
|
||||||
"""Adjust space size to next legal value because of redundancy."""
|
|
||||||
# Extending requires expanding to a multiple of the # of
|
|
||||||
# storage hosts in the cluster
|
|
||||||
count = len(self._make_server_list()) // 2 # Remove -s from count
|
|
||||||
if size_g % count:
|
|
||||||
size_g = int(size_g + count)
|
|
||||||
size_g -= size_g % count
|
|
||||||
return int(math.ceil(size_g))
|
|
||||||
|
|
||||||
def do_setup(self, context):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _get_space_name(self, volume):
|
|
||||||
"""Pull name of /dev/<space> from the provider_id."""
|
|
||||||
try:
|
|
||||||
return volume.get('provider_id')
|
|
||||||
except Exception:
|
|
||||||
return '' # Some error during create, may be able to continue
|
|
||||||
|
|
||||||
def _handle_blocked(self, err, msg):
|
|
||||||
"""Safely handle a return code of BLOCKED from a cluster command.
|
|
||||||
|
|
||||||
Handle the case where a command is in BLOCKED state by trying to
|
|
||||||
cancel it. If the cancel fails, then the command actually did
|
|
||||||
complete. If the cancel succeeds, then throw the original error
|
|
||||||
back up the stack.
|
|
||||||
"""
|
|
||||||
if (err.stdout is not None) and (self.BLOCKED in err.stdout):
|
|
||||||
# Command is queued but did not complete in X seconds, so
|
|
||||||
# we will cancel it to keep things sane.
|
|
||||||
request = err.stdout.split('\n', 1)[0].strip()
|
|
||||||
params = [self.VGCCLUSTER, 'request-cancel']
|
|
||||||
params += ['-r', six.text_type(request)]
|
|
||||||
throw_err = False
|
|
||||||
try:
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
# Cancel succeeded, the command was aborted
|
|
||||||
# Send initial exception up the stack
|
|
||||||
LOG.error("VGC-CLUSTER command blocked and cancelled.")
|
|
||||||
# Can't throw it here, the except below would catch it!
|
|
||||||
throw_err = True
|
|
||||||
except Exception:
|
|
||||||
# The cancel failed because the command was just completed.
|
|
||||||
# That means there was no failure, so continue with Cinder op
|
|
||||||
pass
|
|
||||||
if throw_err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Command %(cmd)s blocked in the CLI and was "
|
|
||||||
"cancelled") % {'cmd': six.text_type(err.cmd)}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
else:
|
|
||||||
# Some other error, just throw it up the chain
|
|
||||||
self._log_cli_err(err)
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
def _add_cinder_apphost(self, spacename):
|
|
||||||
"""Add this host to the apphost list of a space."""
|
|
||||||
# Connect to source volume
|
|
||||||
params = [self.VGCCLUSTER, 'space-set-apphosts']
|
|
||||||
params += ['-n', spacename]
|
|
||||||
params += ['-A', self._hostname()]
|
|
||||||
params += ['--action', 'ADD'] # Non-error to add already existing
|
|
||||||
try:
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
msg = _("Unable to add Cinder host to apphosts for space "
|
|
||||||
"%(space)s") % {'space': spacename}
|
|
||||||
self._handle_blocked(err, msg)
|
|
||||||
|
|
||||||
@lockutils.synchronized('devices', 'cinder-hgst-')
|
|
||||||
def create_volume(self, volume):
|
|
||||||
"""API entry to create a volume on the cluster as a HGST space.
|
|
||||||
|
|
||||||
Creates a volume, adjusting for GiB/GB sizing. Locked to ensure we
|
|
||||||
don't have race conditions on the name we pick to use for the space.
|
|
||||||
"""
|
|
||||||
# For ease of deugging, use friendly name if it exists
|
|
||||||
volname = self._make_space_name(volume['display_name']
|
|
||||||
or volume['name'])
|
|
||||||
volnet = self.configuration.safe_get('hgst_net')
|
|
||||||
volbytes = volume['size'] * units.Gi # OS=Base2, but HGST=Base10
|
|
||||||
volsize_gb_cinder = int(math.ceil(float(volbytes) /
|
|
||||||
float(self.SPACEGB)))
|
|
||||||
volsize_g = self._adjust_size_g(volsize_gb_cinder)
|
|
||||||
params = [self.VGCCLUSTER, 'space-create']
|
|
||||||
params += ['-n', six.text_type(volname)]
|
|
||||||
params += ['-N', six.text_type(volnet)]
|
|
||||||
params += ['-s', six.text_type(volsize_g)]
|
|
||||||
params += ['--redundancy', six.text_type(
|
|
||||||
self.configuration.safe_get('hgst_redundancy'))]
|
|
||||||
params += ['--user', six.text_type(
|
|
||||||
self.configuration.safe_get('hgst_space_user'))]
|
|
||||||
params += ['--group', six.text_type(
|
|
||||||
self.configuration.safe_get('hgst_space_group'))]
|
|
||||||
params += ['--mode', six.text_type(
|
|
||||||
self.configuration.safe_get('hgst_space_mode'))]
|
|
||||||
params += self._make_server_list()
|
|
||||||
params += ['-A', self._hostname()] # Make it visible only here
|
|
||||||
try:
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
msg = _("Error in space-create for %(space)s of size "
|
|
||||||
"%(size)d GB") % {'space': volname,
|
|
||||||
'size': int(volsize_g)}
|
|
||||||
self._handle_blocked(err, msg)
|
|
||||||
# Stash away the hashed name
|
|
||||||
provider = {}
|
|
||||||
provider['provider_id'] = volname
|
|
||||||
return provider
|
|
||||||
|
|
||||||
def update_volume_stats(self):
|
|
||||||
"""Parse the JSON output of vgc-cluster to find space available."""
|
|
||||||
params = [self.VGCCLUSTER, "host-storage", "--json"]
|
|
||||||
try:
|
|
||||||
out, unused = self._execute(*params, run_as_root=True)
|
|
||||||
ret = json.loads(out)
|
|
||||||
cap = ret["totalCapacityBytes"] // units.Gi
|
|
||||||
used = ret["totalUsedBytes"] // units.Gi
|
|
||||||
avail = cap - used
|
|
||||||
if int(self.configuration.safe_get('hgst_redundancy')) == 1:
|
|
||||||
cap = cap // 2
|
|
||||||
avail = avail // 2
|
|
||||||
# Reduce both by 1 GB due to BZ 28320
|
|
||||||
if cap > 0:
|
|
||||||
cap = cap - 1
|
|
||||||
if avail > 0:
|
|
||||||
avail = avail - 1
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
# Could be cluster still starting up, return unknown for now
|
|
||||||
LOG.warning("Unable to poll cluster free space.")
|
|
||||||
self._log_cli_err(err)
|
|
||||||
cap = 'unknown'
|
|
||||||
avail = 'unknown'
|
|
||||||
self._stats['free_capacity_gb'] = avail
|
|
||||||
self._stats['total_capacity_gb'] = cap
|
|
||||||
self._stats['reserved_percentage'] = 0
|
|
||||||
|
|
||||||
def get_volume_stats(self, refresh=False):
|
|
||||||
"""Return Volume statistics, potentially cached copy."""
|
|
||||||
if refresh:
|
|
||||||
self.update_volume_stats()
|
|
||||||
return self._stats
|
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
|
||||||
"""Create a cloned volume from an existing one.
|
|
||||||
|
|
||||||
No cloning operation in the current release so simply copy using
|
|
||||||
DD to a new space. This could be a lengthy operation.
|
|
||||||
"""
|
|
||||||
# Connect to source volume
|
|
||||||
volname = self._get_space_name(src_vref)
|
|
||||||
self._add_cinder_apphost(volname)
|
|
||||||
|
|
||||||
# Make new volume
|
|
||||||
provider = self.create_volume(volume)
|
|
||||||
self._add_cinder_apphost(provider['provider_id'])
|
|
||||||
|
|
||||||
# And copy original into it...
|
|
||||||
info = self._get_space_size_redundancy(volname)
|
|
||||||
volutils.copy_volume(
|
|
||||||
self.local_path(src_vref),
|
|
||||||
"/dev/" + provider['provider_id'],
|
|
||||||
info['sizeBytes'] // units.Mi,
|
|
||||||
self.configuration.volume_dd_blocksize,
|
|
||||||
execute=self._execute)
|
|
||||||
|
|
||||||
# That's all, folks!
|
|
||||||
return provider
|
|
||||||
|
|
||||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
|
||||||
"""Fetch the image from image_service and write it to the volume."""
|
|
||||||
image_utils.fetch_to_raw(context,
|
|
||||||
image_service,
|
|
||||||
image_id,
|
|
||||||
self.local_path(volume),
|
|
||||||
self.configuration.volume_dd_blocksize,
|
|
||||||
size=volume['size'])
|
|
||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
|
||||||
"""Copy the volume to the specified image."""
|
|
||||||
image_utils.upload_volume(context,
|
|
||||||
image_service,
|
|
||||||
image_meta,
|
|
||||||
self.local_path(volume))
|
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
|
||||||
"""Delete a Volume's underlying space."""
|
|
||||||
volname = self._get_space_name(volume)
|
|
||||||
if volname:
|
|
||||||
params = [self.VGCCLUSTER, 'space-delete']
|
|
||||||
params += ['-n', six.text_type(volname)]
|
|
||||||
# This can fail benignly when we are deleting a snapshot
|
|
||||||
try:
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
LOG.warning("Unable to delete space %(space)s",
|
|
||||||
{'space': volname})
|
|
||||||
self._log_cli_err(err)
|
|
||||||
else:
|
|
||||||
# This can be benign when we are deleting a snapshot
|
|
||||||
LOG.warning("Attempted to delete a space that's not there.")
|
|
||||||
|
|
||||||
def _check_host_storage(self, server):
|
|
||||||
if ":" not in server:
|
|
||||||
msg = _("hgst_storage server %(svr)s not of format "
|
|
||||||
"<host>:<dev>") % {'svr': server}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
h, b = server.split(":")
|
|
||||||
try:
|
|
||||||
params = [self.VGCCLUSTER, 'host-storage', '-h', h]
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Storage host %(svr)s not detected, verify "
|
|
||||||
"name") % {'svr': six.text_type(server)}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
|
||||||
"""Throw an exception if configuration values/setup isn't okay."""
|
|
||||||
# Verify vgc-cluster exists and is executable by cinder user
|
|
||||||
try:
|
|
||||||
params = [self.VGCCLUSTER, '--version']
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("Cannot run vgc-cluster command, please ensure software "
|
|
||||||
"is installed and permissions are set properly.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
# Checks the host is identified with the HGST domain, as well as
|
|
||||||
# that vgcnode and vgcclustermgr services are running.
|
|
||||||
self._vgc_host = None
|
|
||||||
self._hostname()
|
|
||||||
|
|
||||||
# Redundancy better be 0 or 1, otherwise no comprendo
|
|
||||||
r = six.text_type(self.configuration.safe_get('hgst_redundancy'))
|
|
||||||
if r not in ["0", "1"]:
|
|
||||||
msg = _("hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in "
|
|
||||||
"cinder.conf.")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
# Verify user and group exist or we can't connect volumes
|
|
||||||
try:
|
|
||||||
pwd.getpwnam(self.configuration.safe_get('hgst_space_user'))
|
|
||||||
grp.getgrnam(self.configuration.safe_get('hgst_space_group'))
|
|
||||||
except KeyError as err:
|
|
||||||
msg = _("hgst_group %(grp)s and hgst_user %(usr)s must map to "
|
|
||||||
"valid users/groups in cinder.conf") % {
|
|
||||||
'grp': self.configuration.safe_get('hgst_space_group'),
|
|
||||||
'usr': self.configuration.safe_get('hgst_space_user')}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
# Verify mode is a nicely formed octal or integer
|
|
||||||
try:
|
|
||||||
int(self.configuration.safe_get('hgst_space_mode'))
|
|
||||||
except Exception as err:
|
|
||||||
msg = _("hgst_space_mode must be an octal/int in cinder.conf")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
# Validate network maps to something we know about
|
|
||||||
try:
|
|
||||||
params = [self.VGCCLUSTER, 'network-list']
|
|
||||||
params += ['-N', self.configuration.safe_get('hgst_net')]
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
self._log_cli_err(err)
|
|
||||||
msg = _("hgst_net %(net)s specified in cinder.conf not found "
|
|
||||||
"in cluster") % {
|
|
||||||
'net': self.configuration.safe_get('hgst_net')}
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
|
|
||||||
# Storage servers require us to split them up and check for
|
|
||||||
sl = self.configuration.safe_get('hgst_storage_servers')
|
|
||||||
if (sl is None) or (six.text_type(sl) == ""):
|
|
||||||
msg = _("hgst_storage_servers must be defined in cinder.conf")
|
|
||||||
raise exception.VolumeDriverException(message=msg)
|
|
||||||
servers = sl.split(",")
|
|
||||||
# Each server must be of the format <host>:<storage> w/host in domain
|
|
||||||
for server in servers:
|
|
||||||
self._check_host_storage(server)
|
|
||||||
|
|
||||||
# We made it here, we should be good to go!
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
|
||||||
"""Create a snapshot volume.
|
|
||||||
|
|
||||||
We don't yet support snaps in SW so make a new volume and dd the
|
|
||||||
source one into it. This could be a lengthy operation.
|
|
||||||
"""
|
|
||||||
origvol = {}
|
|
||||||
origvol['name'] = snapshot['volume_name']
|
|
||||||
origvol['size'] = snapshot['volume_size']
|
|
||||||
origvol['id'] = snapshot['volume_id']
|
|
||||||
origvol['provider_id'] = snapshot.get('volume').get('provider_id')
|
|
||||||
# Add me to the apphosts so I can see the volume
|
|
||||||
self._add_cinder_apphost(self._get_space_name(origvol))
|
|
||||||
|
|
||||||
# Make snapshot volume
|
|
||||||
snapvol = {}
|
|
||||||
snapvol['display_name'] = snapshot['display_name']
|
|
||||||
snapvol['name'] = snapshot['name']
|
|
||||||
snapvol['size'] = snapshot['volume_size']
|
|
||||||
snapvol['id'] = snapshot['id']
|
|
||||||
provider = self.create_volume(snapvol)
|
|
||||||
# Create_volume attaches the volume to this host, ready to snapshot.
|
|
||||||
# Copy it using dd for now, we don't have real snapshots
|
|
||||||
# We need to copy the entire allocated volume space, Nova will allow
|
|
||||||
# full access, even beyond requested size (when our volume is larger
|
|
||||||
# due to our ~1B byte alignment or cluster makeup)
|
|
||||||
info = self._get_space_size_redundancy(origvol['provider_id'])
|
|
||||||
volutils.copy_volume(
|
|
||||||
self.local_path(origvol),
|
|
||||||
"/dev/" + provider['provider_id'],
|
|
||||||
info['sizeBytes'] // units.Mi,
|
|
||||||
self.configuration.volume_dd_blocksize,
|
|
||||||
execute=self._execute)
|
|
||||||
return provider
|
|
||||||
|
|
||||||
def delete_snapshot(self, snapshot):
|
|
||||||
"""Delete a snapshot. For now, snapshots are full volumes."""
|
|
||||||
self.delete_volume(snapshot)
|
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
|
||||||
"""Create volume from a snapshot, but snaps still full volumes."""
|
|
||||||
return self.create_cloned_volume(volume, snapshot)
|
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
|
||||||
"""Extend an existing volume.
|
|
||||||
|
|
||||||
We may not actually need to resize the space because it's size is
|
|
||||||
always rounded up to a function of the GiB/GB and number of storage
|
|
||||||
nodes.
|
|
||||||
"""
|
|
||||||
volname = self._get_space_name(volume)
|
|
||||||
info = self._get_space_size_redundancy(volname)
|
|
||||||
volnewbytes = new_size * units.Gi
|
|
||||||
new_size_g = math.ceil(float(volnewbytes) / float(self.SPACEGB))
|
|
||||||
wantedsize_g = self._adjust_size_g(new_size_g)
|
|
||||||
havesize_g = (info['sizeBytes'] // self.SPACEGB)
|
|
||||||
if havesize_g >= wantedsize_g:
|
|
||||||
return # Already big enough, happens with redundancy
|
|
||||||
else:
|
|
||||||
# Have to extend it
|
|
||||||
delta = int(wantedsize_g - havesize_g)
|
|
||||||
params = [self.VGCCLUSTER, 'space-extend']
|
|
||||||
params += ['-n', six.text_type(volname)]
|
|
||||||
params += ['-s', six.text_type(delta)]
|
|
||||||
params += self._make_server_list()
|
|
||||||
try:
|
|
||||||
self._execute(*params, run_as_root=True)
|
|
||||||
except processutils.ProcessExecutionError as err:
|
|
||||||
msg = _("Error in space-extend for volume %(space)s with "
|
|
||||||
"%(size)d additional GB") % {'space': volname,
|
|
||||||
'size': delta}
|
|
||||||
self._handle_blocked(err, msg)
|
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector):
|
|
||||||
"""Return connection information.
|
|
||||||
|
|
||||||
Need to return noremovehost so that the Nova host
|
|
||||||
doesn't accidentally remove us from the apphost list if it is
|
|
||||||
running on the same host (like in devstack testing).
|
|
||||||
"""
|
|
||||||
hgst_properties = {'name': volume['provider_id'],
|
|
||||||
'noremovehost': self._hostname()}
|
|
||||||
return {'driver_volume_type': 'hgst',
|
|
||||||
'data': hgst_properties}
|
|
||||||
|
|
||||||
def local_path(self, volume):
|
|
||||||
"""Query the provider_id to figure out the proper devnode."""
|
|
||||||
return "/dev/" + self._get_space_name(volume)
|
|
||||||
|
|
||||||
def create_export(self, context, volume, connector):
|
|
||||||
# Not needed for spaces
|
|
||||||
pass
|
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
|
||||||
# Not needed for spaces
|
|
||||||
pass
|
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
|
||||||
# Not needed for spaces
|
|
||||||
pass
|
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
|
||||||
# Not needed for spaces
|
|
||||||
pass
|
|
@ -1,17 +0,0 @@
|
|||||||
===================
|
|
||||||
HGST Storage driver
|
|
||||||
===================
|
|
||||||
|
|
||||||
The HGST driver enables Cinder volumes using the HGST Flash Storage Suite.
|
|
||||||
|
|
||||||
Set the following in your ``cinder.conf`` file, and use the following
|
|
||||||
options to configure it.
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
volume_driver = cinder.volume.drivers.hgst.HGSTDriver
|
|
||||||
|
|
||||||
.. config-table::
|
|
||||||
:config-target: HGST Storage
|
|
||||||
|
|
||||||
cinder.volume.drivers.hgst
|
|
@ -42,7 +42,6 @@ Driver Configuration Reference
|
|||||||
drivers/emc-xtremio-driver
|
drivers/emc-xtremio-driver
|
||||||
drivers/drbd-driver
|
drivers/drbd-driver
|
||||||
drivers/fujitsu-eternus-dx-driver
|
drivers/fujitsu-eternus-dx-driver
|
||||||
drivers/hgst-driver
|
|
||||||
drivers/hpe-3par-driver
|
drivers/hpe-3par-driver
|
||||||
drivers/hpe-lefthand-driver
|
drivers/hpe-lefthand-driver
|
||||||
drivers/hp-msa-driver
|
drivers/hp-msa-driver
|
||||||
|
@ -54,9 +54,6 @@ title=Dell EMC VNX Storage Driver (FC, iSCSI)
|
|||||||
[driver.fujitsu_eternus]
|
[driver.fujitsu_eternus]
|
||||||
title=Fujitsu ETERNUS Driver (FC, iSCSI)
|
title=Fujitsu ETERNUS Driver (FC, iSCSI)
|
||||||
|
|
||||||
[driver.hgst]
|
|
||||||
title=HGST Flash Storage Suite Driver (vgc)
|
|
||||||
|
|
||||||
[driver.hpe_3par]
|
[driver.hpe_3par]
|
||||||
title=HPE 3PAR Storage Driver (FC, iSCSI)
|
title=HPE 3PAR Storage Driver (FC, iSCSI)
|
||||||
|
|
||||||
@ -221,7 +218,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=complete
|
driver.dell_emc_xtremio=complete
|
||||||
driver.fujitsu_eternus=complete
|
driver.fujitsu_eternus=complete
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=complete
|
driver.hpe_mmsa=complete
|
||||||
@ -288,7 +284,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=complete
|
driver.dell_emc_xtremio=complete
|
||||||
driver.fujitsu_eternus=complete
|
driver.fujitsu_eternus=complete
|
||||||
driver.hgst=complete
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=complete
|
driver.hpe_mmsa=complete
|
||||||
@ -355,7 +350,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=missing
|
driver.dell_emc_xtremio=missing
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=missing
|
driver.hpe_3par=missing
|
||||||
driver.hpe_lefthand=missing
|
driver.hpe_lefthand=missing
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -423,7 +417,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=missing
|
driver.dell_emc_xtremio=missing
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=missing
|
driver.hpe_lefthand=missing
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -492,7 +485,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=missing
|
driver.dell_emc_vxflexos=missing
|
||||||
driver.dell_emc_xtremio=missing
|
driver.dell_emc_xtremio=missing
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -562,7 +554,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=complete
|
driver.dell_emc_xtremio=complete
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -631,7 +622,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=complete
|
driver.dell_emc_xtremio=complete
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=complete
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -701,7 +691,6 @@ driver.dell_emc_vnx=complete
|
|||||||
driver.dell_emc_vxflexos=missing
|
driver.dell_emc_vxflexos=missing
|
||||||
driver.dell_emc_xtremio=missing
|
driver.dell_emc_xtremio=missing
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=missing
|
driver.hpe_3par=missing
|
||||||
driver.hpe_lefthand=missing
|
driver.hpe_lefthand=missing
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
@ -771,7 +760,6 @@ driver.dell_emc_vnx=missing
|
|||||||
driver.dell_emc_vxflexos=complete
|
driver.dell_emc_vxflexos=complete
|
||||||
driver.dell_emc_xtremio=complete
|
driver.dell_emc_xtremio=complete
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hgst=missing
|
|
||||||
driver.hpe_3par=missing
|
driver.hpe_3par=missing
|
||||||
driver.hpe_lefthand=complete
|
driver.hpe_lefthand=complete
|
||||||
driver.hpe_mmsa=missing
|
driver.hpe_mmsa=missing
|
||||||
|
@ -66,4 +66,5 @@ release.
|
|||||||
* Rocky
|
* Rocky
|
||||||
* CoprHD Storage Driver (FC, iSCSI, ScaleIO)
|
* CoprHD Storage Driver (FC, iSCSI, ScaleIO)
|
||||||
|
|
||||||
|
* Stein
|
||||||
|
* HGST Flash Storage Suite Driver (vgc)
|
||||||
|
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
The HGST Flash Suite storage driver has been removed after completion
|
||||||
|
of its deprecation period without a reliable 3rd Party CI system being
|
||||||
|
supported. Customers using the HGST Flash Suite driver should not upgrade
|
||||||
|
Cinder without first migrating all volumes from their HGST backend
|
||||||
|
to a supported storage backend. Failure to migrate volumes will
|
||||||
|
result in no longer being able to access volumes backed by the HGST
|
||||||
|
storage backend.
|
||||||
|
other:
|
||||||
|
- |
|
||||||
|
The HGST Flash Storage Suite Driver was marked unsupported
|
||||||
|
in the Rocky release because their 3rd Party CI system was not
|
||||||
|
meeting Cinder's requirements. The system has not started reporting
|
||||||
|
so the driver is now removed as of the Stein release.
|
Loading…
Reference in New Issue
Block a user