From fe0822b1bcd42d3d77e06b3e383d5ee6b22f25e9 Mon Sep 17 00:00:00 2001 From: yangheng Date: Wed, 16 Dec 2020 09:42:56 +0000 Subject: [PATCH] Add Cinder driver for TOYOU ACS5000 * Supported Protocol - iSCSI * Supported Feature - Volume Create/Delete - Volume Attach/Detach - Snapshot Create/Delete - Create Volume from Snapshot - Get Volume Stats - Copy Image to Volume - Copy Volume to Image - Clone Volume - Extend Volume - Volume Migration ThirdPartySystems: TOYOU ACS5000 CI Change-Id: Ia7d2056e0a49032654812da74dac514dfac83529 Implement: blueprint toyou-acs5000-driver --- cinder/opts.py | 8 + .../unit/volume/drivers/toyou/__init__.py | 0 .../unit/volume/drivers/toyou/test_acs5000.py | 1341 +++++++++++++++++ cinder/volume/drivers/toyou/__init__.py | 0 .../volume/drivers/toyou/acs5000/__init__.py | 0 .../drivers/toyou/acs5000/acs5000_common.py | 828 ++++++++++ .../drivers/toyou/acs5000/acs5000_iscsi.py | 136 ++ .../drivers/toyou-acs5000-driver.rst | 74 + doc/source/reference/support-matrix.ini | 14 + ...toyou-acs5000-driver-16449ca18280def3.yaml | 5 + 10 files changed, 2406 insertions(+) create mode 100644 cinder/tests/unit/volume/drivers/toyou/__init__.py create mode 100644 cinder/tests/unit/volume/drivers/toyou/test_acs5000.py create mode 100644 cinder/volume/drivers/toyou/__init__.py create mode 100644 cinder/volume/drivers/toyou/acs5000/__init__.py create mode 100644 cinder/volume/drivers/toyou/acs5000/acs5000_common.py create mode 100644 cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py create mode 100644 doc/source/configuration/block-storage/drivers/toyou-acs5000-driver.rst create mode 100644 releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml diff --git a/cinder/opts.py b/cinder/opts.py index 05236b03c61..0d12a59b722 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -158,6 +158,10 @@ from cinder.volume.drivers.stx import common as \ cinder_volume_drivers_stx_common from cinder.volume.drivers.synology import synology_common as \ cinder_volume_drivers_synology_synologycommon +from cinder.volume.drivers.toyou.acs5000 import acs5000_common as \ + cinder_volume_drivers_toyou_acs5000_acs5000common +from cinder.volume.drivers.toyou.acs5000 import acs5000_iscsi as \ + cinder_volume_drivers_toyou_acs5000_acs5000iscsi from cinder.volume.drivers.veritas_access import veritas_iscsi as \ cinder_volume_drivers_veritas_access_veritasiscsi from cinder.volume.drivers.vmware import vmdk as \ @@ -272,6 +276,10 @@ def list_opts(): cinder_volume_drivers_open_e_options.jdss_iscsi_opts, cinder_volume_drivers_open_e_options.jdss_volume_opts, cinder_volume_drivers_sandstone_sdsdriver.sds_opts, + cinder_volume_drivers_toyou_acs5000_acs5000common. + acs5000c_opts, + cinder_volume_drivers_toyou_acs5000_acs5000iscsi. + acs5000_iscsi_opts, cinder_volume_drivers_veritas_access_veritasiscsi.VA_VOL_OPTS, cinder_volume_manager.volume_manager_opts, cinder_wsgi_eventletserver.socket_opts, diff --git a/cinder/tests/unit/volume/drivers/toyou/__init__.py b/cinder/tests/unit/volume/drivers/toyou/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/volume/drivers/toyou/test_acs5000.py b/cinder/tests/unit/volume/drivers/toyou/test_acs5000.py new file mode 100644 index 00000000000..6e77040ce84 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/toyou/test_acs5000.py @@ -0,0 +1,1341 @@ +# Copyright 2020 toyou Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Testing for acs5000 san storage driver +""" + +import copy +import json +import random +import time +from unittest import mock + +from eventlet import greenthread +from oslo_concurrency import processutils +from oslo_config import cfg +from oslo_utils import excutils +from oslo_utils import importutils +from oslo_utils import units +import paramiko + +from cinder import context +from cinder import exception +from cinder import ssh_utils +from cinder.tests.unit import test +from cinder.tests.unit import utils as testutils +from cinder import utils as cinder_utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.toyou.acs5000 import acs5000_common +from cinder.volume.drivers.toyou.acs5000 import acs5000_iscsi + +POOLS_NAME = ['pool01', 'pool02'] +VOLUME_PRE = acs5000_common.VOLUME_PREFIX +# luns number only for test +LUN_NUMS_AVAILABLE = range(0, 5) +# snapshot count on a volume, only for test +SNAPSHOTS_A_VOLUME = 3 +# snapshot count on a system, only for test +SNAPSHOTS_ON_SYSTEM = 10 +# volume count on a pool, only for test +VOLUME_LIMIT_ON_POOL = 10 +# volume count on a pool, only for test +VOLUME_LIMIT_ON_SYSTEM = 16 +# volume count on a system, only for test + +CONF = cfg.CONF + + +class CommandSimulator(object): + def __init__(self, pool_name): + self._all_pools_name = {'acs5000_volpool_name': pool_name} + self._pools_list = { + 'pool01': { + 'name': 'pool01', + 'capacity': '799090409472', + 'free_capacity': '795869184000', + 'used_capacity': '3221225472', + 'total_volumes': 0}, + 'pool02': { + 'name': 'pool02', + 'capacity': '193273528320', + 'free_capacity': '190052302848', + 'used_capacity': '3221225472', + 'total_volumes': 0 + }} + self._volumes_list = {} + self._lun_maps_list = [] + self._snapshots_list = [] + self._controllers_list = [ + {'id': '1', + 'name': 'node1', + 'iscsi_name': 'iqn.2020-12.cn.com.toyou:' + 'disk-array-000f12345:dev0.ctr1', + 'WWNN': '200008CA45D33768', + 'status': 'online'}, + {'id': '2', + 'name': 'node2', + 'iscsi_name': 'iqn.2020-04.cn.com.toyou:' + 'disk-array-000f12345:dev0.ctr2', + 'WWNN': '200008CA45D33768', + 'status': 'online'}] + self._system_info = {'version': '3.1.2.345678', + 'vendor': 'TOYOU', + 'system_name': 'Disk-Array', + 'system_id': 'TY123456789ABCDEF', + 'code_level': '1', + 'ip': '10.0.0.1'} + + self._error = { + 'success': (0, 'Success'), + 'unknown': (1, 'unknown error'), + 'pool_not_exist': (101, 'The pool does not exist ' + 'on the system.'), + 'pool_exceeds_size': (102, 'The pool cannot provide ' + 'more storage space'), + 'volume_not_exist': (303, 'The volume does not exist ' + 'on the system.'), + 'source_volume_not_exist': (304, 'A clone relation needs ' + 'a source volume.'), + 'target_volume_not_exist': (305, 'A clone relation needs ' + 'a target volume.'), + 'source_size_larger_target': (306, 'The source volume ' + 'must not be larger ' + 'than the target volume' + ' in a clone relation '), + 'volume_limit_pool': (307, 'A pool only supports 96 volumes'), + 'volume_limit_system': (308, 'A system only supports 96 volumes'), + 'volume_name_exist': (310, 'A volume with same name ' + 'already exists on the system.'), + 'volume_extend_min': (321, 'A volume capacity shall not be' + ' less than the current size'), + 'lun_not_exist': (401, 'The volume does not exist ' + 'on the system.'), + 'not_available_lun': (402, 'The system have no available lun.'), + 'snap_over_system': (503, 'The system snapshots maximum quantity ' + 'has been reached.'), + 'snap_over_volume': (504, 'A volume snapshots maximum quantity ' + 'has been reached.'), + 'snap_not_exist': (505, 'The snapshot does not exist ' + 'on the system.') + } + self._command_function = { + 'sshGetSystem': 'get_system', + 'sshGetIscsiConnect': 'get_ip_connect', + 'sshGetPoolInfo': 'get_pool_info', + 'sshGetVolume': 'get_volume', + 'sshGetCtrInfo': 'ls_ctr_info', + 'sshCreateVolume': 'create_volume', + 'sshDeleteVolume': 'delete_volume', + 'sshCinderExtendVolume': 'extend_volume', + 'sshMkLocalClone': 'create_clone', + 'sshMkStartLocalClone': 'start_clone', + 'sshRemoveLocalClone': 'delete_clone', + 'sshMapVoltoHost': 'create_lun_map', + 'sshDeleteLunMap': 'delete_lun_map', + 'sshCreateSnapshot': 'create_snapshot', + 'sshDeleteSnapshot': 'delete_snapshot', + 'sshSetVolumeProperty': 'set_volume_property', + 'error_ssh': 'error_ssh' + } + self._volume_type = { + '0': 'RAID Volume', + '10': 'BACKUP' + } + + @staticmethod + def _json_return(rows=None, msg='', key=0): + json_data = {'key': key, + 'msg': msg, + 'arr': rows} + return (json.dumps(json_data), '') + + @staticmethod + def _create_id(lists, key='id'): + ids = [] + if isinstance(lists, list): + for v in lists: + ids.append(int(v[key])) + elif isinstance(lists, dict): + for v in lists.values(): + ids.append(int(v[key])) + new_id = 'ffffffffff' + while True: + new_id = str(random.randint(1000000000, 9999999999)) + if new_id not in ids: + break + return new_id + + def _clone_thread(self, vol_name, setting=None): + intval = 0.1 + loop_times = int(self._volumes_list[vol_name]['size_gb']) + chunk = int(100 / loop_times) + if setting: + for k, value in setting.items(): + for v in value: + self._volumes_list[k][v[0]] = v[1] + time.sleep(v[2]) + + self._volumes_list[vol_name]['status'] = 'Cloning' + while loop_times > 0: + # volumes may be deleted + if vol_name in self._volumes_list: + src_vol = self._volumes_list[vol_name] + else: + return + if src_vol['clone'] not in self._volumes_list: + self._volumes_list[vol_name]['status'] = 'Online' + self._volumes_list[vol_name]['r'] = '' + return + progress = src_vol['r'] + if not progress: + progress = 0 + src_vol['r'] = str(int(progress) + chunk) + loop_times -= 1 + self._volumes_list[vol_name] = src_vol + time.sleep(intval) + self._volumes_list[vol_name]['status'] = 'Online' + self._volumes_list[vol_name]['r'] = '' + + def execute_command(self, cmd_list, check_exit_code=True): + command = cmd_list[2] + if command in self._command_function: + command = self._command_function[command] + func = getattr(self, '_sim_' + command) + kwargs = {} + for i in range(3, len(cmd_list)): + if cmd_list[i].startswith('--'): + key = cmd_list[i][2:] + value = '' + if cmd_list[i + 1]: + value = cmd_list[i + 1] + i += 1 + if key in kwargs.keys(): + if not isinstance(kwargs[key], list): + kwargs[key] = [kwargs[key]] + + kwargs[key].append(value) + else: + kwargs[key] = value + try: + out, err = func(**kwargs) + return (out, err) + except Exception as e: + with excutils.save_and_reraise_exception(): + if check_exit_code: + raise processutils.ProcessExecutionError( + exit_code=1, + stdout='out', + stderr=e, + cmd=' '.join(cmd_list)) + + def _sim_get_system(self, **kwargs): + return self._json_return(self._system_info) + + def _sim_get_ip_connect(self, **kwargs): + target = kwargs['target'] + data = { + 'iscsi_name': [], + 'portal': [], + 'state': 'online' + } + ctr1_iscsi = ('iqn.2020-04.cn.com.toyou:disk-array-' + '000f12345:dev%s.ctr1' % target) + ctr2_iscsi = ('iqn.2020-04.cn.com.toyou:disk-array-' + '000f12345:dev%s.ctr2' % target) + ctr1_port1 = '10.23.45.67:3260' + ctr1_port2 = '10.23.45.68:3260' + ctr2_port1 = '10.23.45.69:3260' + ctr2_port2 = '10.23.45.70:3260' + data['iscsi_name'] = [ctr1_iscsi, ctr2_iscsi] + data['portal'] = [ctr1_port1, ctr1_port2, ctr2_port1, ctr2_port2] + + return self._json_return(data) + + def _sim_get_pool_info(self, **kwargs): + pool_name = kwargs['poolName'].strip('\'\"') + if pool_name in self._all_pools_name['acs5000_volpool_name']: + vol_len = 0 + for vol in self._volumes_list.values(): + if vol['poolname'] == pool_name: + vol_len += 1 + if pool_name in self._pools_list: + pool_data = self._pools_list[pool_name] + else: + pool_data = self._pools_list['pool01'] + pool_data['name'] = pool_name + pool_data['total_volumes'] = str(vol_len) + return self._json_return(pool_data) + else: + return self._json_return() + + def _sim_get_volume(self, **kwargs): + rows = [] + if isinstance(kwargs['name'], list): + volume_name = kwargs['name'] + else: + volume_name = [kwargs['name']] + for vol_name in volume_name: + if vol_name in self._volumes_list.keys(): + rows.append(self._volumes_list[vol_name]) + + return self._json_return(rows) + + def _sim_ls_ctr_info(self, **kwargs): + return self._json_return(self._controllers_list) + + def _sim_create_volume(self, **kwargs): + volume_name = kwargs['volumename'] + pool_name = kwargs['cinderPool'] + size = kwargs['volumesize'] + if volume_name in self._volumes_list: + return self._json_return( + msg=self._error['volume_name_exist'][1], + key=self._error['volume_name_exist'][0]) + elif len(self._volumes_list) >= VOLUME_LIMIT_ON_SYSTEM: + return self._json_return( + msg=self._error['volume_limit_system'][1], + key=self._error['volume_limit_system'][0]) + + volume_count_on_pool = 0 + for v in self._volumes_list.values(): + if v['poolname'] == pool_name: + volume_count_on_pool += 1 + if volume_count_on_pool >= VOLUME_LIMIT_ON_POOL: + return self._json_return( + msg=self._error['volume_limit_pool'][1], + key=self._error['volume_limit_pool'][0]) + avail_size = (int(self._pools_list[pool_name]['free_capacity']) + / units.Gi) + if int(size) > avail_size: + return self._json_return( + msg=self._error['pool_exceeds_size'][1], + key=self._error['pool_exceeds_size'][0]) + volume_info = {} + volume_info['id'] = self._create_id(self._volumes_list) + volume_info['name'] = volume_name + volume_info['size_gb'] = size + volume_info['status'] = 'Online' + volume_info['health'] = 'Optimal' + volume_info['r'] = '' + volume_info['poolname'] = pool_name + volume_info['has_clone'] = 0 + volume_info['clone'] = 'N/A' + volume_info['clone_snap'] = 'N/A' + type = kwargs['type'] + if type not in ('0', '10'): + type = '0' + volume_info['type'] = self._volume_type[type] + self._volumes_list[volume_info['name']] = volume_info + return self._json_return() + + def _sim_delete_volume(self, **kwargs): + vol_name = kwargs['cinderVolume'] + if vol_name in self._volumes_list: + del self._volumes_list[vol_name] + return self._json_return() + + def _sim_extend_volume(self, **kwargs): + vol_name = kwargs['cinderVolume'] + size = int(kwargs['extendsize']) + if vol_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + volume = self._volumes_list[vol_name] + curr_size = int(volume['size_gb']) + pool = self._pools_list[volume['poolname']] + avail_size = int(pool['free_capacity']) / units.Gi + if curr_size > size: + return self._json_return( + msg=self._error['volume_extend_min'][1], + key=self._error['volume_extend_min'][0]) + elif (size - curr_size) > avail_size: + return self._json_return( + msg=self._error['pool_exceeds_size'][1], + key=self._error['pool_exceeds_size'][0]) + self._volumes_list[vol_name]['size_gb'] = str(size) + return self._json_return() + + def _sim_create_clone(self, **kwargs): + src_name = kwargs['cinderVolume'] + tgt_name = kwargs['cloneVolume'] + src_exist = False + tgt_exist = False + for vol in self._volumes_list.values(): + if (vol['name'] == src_name + and vol['type'] == self._volume_type['0']): + src_exist = True + elif (vol['name'] == tgt_name + and vol['type'] == self._volume_type['10']): + tgt_exist = True + if src_exist and tgt_exist: + break + if not src_exist: + return self._json_return( + msg=self._error['source_volume_not_exist'][1], + key=self._error['source_volume_not_exist'][0]) + elif not tgt_exist: + return self._json_return( + msg=self._error['target_volume_not_exist'][1], + key=self._error['target_volume_not_exist'][0]) + src_size = int(self._volumes_list[src_name]['size_gb']) + tgt_size = int(self._volumes_list[tgt_name]['size_gb']) + if src_size > tgt_size: + return self._json_return( + msg=self._error['source_size_larger_target'][1], + key=self._error['source_size_larger_target'][0]) + tgt_volume = self._volumes_list[tgt_name] + self._volumes_list[src_name]['has_clone'] = 1 + self._volumes_list[src_name]['clone'] = tgt_volume['name'] + return self._json_return() + + def _sim_start_clone(self, **kwargs): + vol_name = kwargs['cinderVolume'] + snapshot = kwargs['snapshot'] + if len(snapshot) > 0: + snap_found = False + for snap in self._snapshots_list: + if snap['name'] == snapshot: + snap_found = True + break + if not snap_found: + return self._json_return( + msg=self._error['snap_not_exist'][1], + key=self._error['snap_not_exist'][0]) + else: + snapshot = ('clone-' + str(random.randint(100, 999))) + tmp_snap = {'volume': vol_name, + 'snapshot': snapshot} + self._sim_create_snapshot(**tmp_snap) + self._volumes_list[vol_name]['status'] = 'Queued' + self._volumes_list[vol_name]['clone_snap'] = snapshot + greenthread.spawn_n(self._clone_thread, vol_name) + return self._json_return() + + def _sim_delete_clone(self, **kwargs): + vol_name = kwargs['name'] + snapshot = kwargs['snapshot'] + if vol_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + self._volumes_list[vol_name]['has_clone'] = 0 + clone_volume = self._volumes_list[vol_name]['clone'] + self._volumes_list[vol_name]['clone'] = 'N/A' + clone_snap = self._volumes_list[vol_name]['clone_snap'] + self._volumes_list[vol_name]['clone_snap'] = 'N/A' + self._volumes_list[clone_volume]['type'] = self._volume_type['0'] + if len(snapshot) == 0: + for snap in self._snapshots_list: + if clone_snap == snap['name']: + self._snapshots_list.remove(snap) + break + return self._json_return() + + def _sim_create_lun_map(self, **kwargs): + volume_name = kwargs['cinderVolume'] + protocol = kwargs['protocol'] + hosts = kwargs['host'] + target = kwargs['target'] + if volume_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + if isinstance(hosts, str): + hosts = [hosts] + volume = self._volumes_list[volume_name] + available_luns = LUN_NUMS_AVAILABLE + existed_lun = -1 + for lun_row in self._lun_maps_list: + if lun_row['vd_id'] == volume['id']: + if lun_row['host'] in hosts: + existed_lun = lun_row['lun'] + hosts = [h for h in hosts if h != lun_row['host']] + else: + if (lun_row['protocol'] == protocol + and lun_row['target'] == target): + available_luns = [lun for lun in available_luns + if lun != lun_row['lun']] + if hosts and existed_lun > -1: + return self._json_return({'info': existed_lun}) + lun_info = {} + lun_info['vd_id'] = volume['id'] + lun_info['vd_name'] = volume['name'] + lun_info['protocol'] = protocol + lun_info['target'] = target + if existed_lun > -1: + lun_info['lun'] = existed_lun + elif available_luns: + lun_info['lun'] = available_luns[0] + else: + return self._json_return( + msg=self._error['not_available_lun'][1], + key=self._error['not_available_lun'][0]) + for host in hosts: + lun_info['id'] = self._create_id(self._lun_maps_list) + lun_info['host'] = host + self._lun_maps_list.append(copy.deepcopy(lun_info)) + return self._json_return({'info': lun_info['lun']}) + + def _sim_delete_lun_map(self, **kwargs): + map_exist = False + volume_name = kwargs['cinderVolume'] + protocol = kwargs['protocol'] + hosts = kwargs['cinderHost'] + target = kwargs['cinderTarget'] + if isinstance(hosts, str): + hosts = [hosts] + if volume_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + volume = self._volumes_list[volume_name] + lun_maps_list = self._lun_maps_list + self._lun_maps_list = [] + for row in lun_maps_list: + if (row['vd_id'] == volume['id'] + and row['protocol'] == protocol + and row['host'] in hosts + and row['target'] == target): + map_exist = True + else: + map_exist = False + self._lun_maps_list.append(row) + if not map_exist: + return self._json_return( + msg=self._error['lun_not_exist'][1], + key=self._error['lun_not_exist'][0]) + else: + return self._json_return() + + def _sim_create_snapshot(self, **kwargs): + volume_name = kwargs['volume'] + snapshot_name = kwargs['snapshot'] + if volume_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + if len(self._snapshots_list) >= SNAPSHOTS_ON_SYSTEM: + return self._json_return( + msg=self._error['snap_over_system'][1], + key=self._error['snap_over_system'][0]) + tag = -1 + volume_snap_count = 0 + for snap in self._snapshots_list: + if snap['vd_name'] == volume_name: + volume_snap_count += 1 + if int(snap['tag']) > tag: + tag = int(snap['tag']) + if volume_snap_count >= SNAPSHOTS_A_VOLUME: + return self._json_return( + msg=self._error['snap_over_volume'][1], + key=self._error['snap_over_volume'][0]) + volume = self._volumes_list[volume_name] + snapshot = {} + snapshot['id'] = self._create_id(self._snapshots_list) + snapshot['name'] = snapshot_name + snapshot['vd_id'] = volume['id'] + snapshot['vd_name'] = volume['name'] + snapshot['tag'] = tag + 1 + snapshot['create_time'] = '' + self._snapshots_list.append(snapshot) + return self._json_return() + + def _sim_delete_snapshot(self, **kwargs): + volume_name = kwargs['volume'] + snapshot_name = kwargs['snapshot'] + if volume_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + snap_exist = False + for snap in self._snapshots_list: + if (snap['vd_name'] == volume_name + and snap['name'] == snapshot_name): + snap_exist = True + self._snapshots_list.remove(snap) + break + if not snap_exist: + return self._json_return( + msg=self._error['snap_not_exist'][1], + key=self._error['snap_not_exist'][0]) + return self._json_return() + + def _sim_set_volume_property(self, **kwargs): + volume_name = kwargs['volume'] + kwargs.pop('volume') + if len(kwargs) == 0: + raise exception.InvalidInput( + reason=self._error['unknown'][1]) + new_name = volume_name + if 'new_name' in kwargs: + new_name = kwargs['new_name'] + kwargs.pop('new_name') + if volume_name not in self._volumes_list: + return self._json_return( + msg=self._error['volume_not_exist'][1], + key=self._error['volume_not_exist'][0]) + volume = self._volumes_list[volume_name] + volume['name'] = new_name + for k, v in kwargs.items(): + if k in volume: + volume[k] = v + else: + return ('', self._error['unknown'][1]) + + if volume_name != new_name: + del self._volumes_list[volume_name] + self._volumes_list[new_name] = volume + else: + self._volumes_list[volume_name] = volume + return self._json_return() + + def _sim_error_ssh(self, **kwargs): + error = kwargs['error'] + if error == 'json_error': + return ('This text is used for json errors.', '') + elif error == 'dict_error': + return (json.dumps('This text is used for dict errors.'), '') + elif error == 'keys_error': + keys = {'msg': 'This text is used for keys errors'} + return (json.dumps(keys), '') + elif error == 'key_false': + keys = {'msg': 'This text is used for key non-0 error', + 'key': 1, + 'arr': {}} + return (json.dumps(keys), '') + + +class Acs5000ISCSIFakeDriver(acs5000_iscsi.Acs5000ISCSIDriver): + def __init__(self, *args, **kwargs): + super(Acs5000ISCSIFakeDriver, self).__init__(*args, **kwargs) + + def set_fake_storage(self, fake): + self.fake_storage = fake + + def _run_ssh(self, cmd_list, check_exit_code=True): + cinder_utils.check_ssh_injection(cmd_list) + ret = self.fake_storage.execute_command(cmd_list, check_exit_code) + + return ret + + +class Acs5000ISCSIDriverTestCase(test.TestCase): + @mock.patch.object(time, 'sleep') + def setUp(self, mock_sleep): + super(Acs5000ISCSIDriverTestCase, self).setUp() + self.configuration = mock.Mock(conf.Configuration) + self.configuration.san_is_local = False + self.configuration.san_ip = '23.44.56.78' + self.configuration.san_login = 'cliuser' + self.configuration.san_password = 'clipassword' + self.configuration.acs5000_volpool_name = ['pool01'] + self.configuration.acs5000_target = 0 + self.iscsi_driver = Acs5000ISCSIFakeDriver( + configuration=self.configuration) + initiator = 'test.iqn.%s' % str(random.randint(10000, 99999)) + self._connector = {'ip': '1.234.56.78', + 'host': 'stack', + 'wwpns': [], + 'initiator': initiator} + self.sim = CommandSimulator(POOLS_NAME) + self.iscsi_driver.set_fake_storage(self.sim) + self.ctxt = context.get_admin_context() + + db_driver = CONF.db_driver + self.db = importutils.import_module(db_driver) + self.iscsi_driver.db = self.db + self.iscsi_driver.get_driver_options() + self.iscsi_driver.do_setup(None) + self.iscsi_driver.check_for_setup_error() + + def _create_volume(self, **kwargs): + prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], + 'size': 1, + 'volume_type_id': self.vt['id']} + for p in prop.keys(): + if p not in kwargs: + kwargs[p] = prop[p] + vol = testutils.create_volume(self.ctxt, **kwargs) + self.iscsi_driver.create_volume(vol) + return vol + + def _delete_volume(self, volume): + self.iscsi_driver.delete_volume(volume) + self.db.volume_destroy(self.ctxt, volume['id']) + + def _assert_lun_exists(self, vol_id, exists): + lun_maps = self.sim._lun_maps_list + is_lun_defined = False + luns = [] + volume_name = VOLUME_PRE + vol_id[-12:] + for lun in lun_maps: + if volume_name == lun['vd_name']: + luns.append(lun) + if len(luns): + is_lun_defined = True + self.assertEqual(exists, is_lun_defined) + return luns + + def test_validate_connector(self): + conn_neither = {'host': 'host'} + conn_iscsi = {'host': 'host', 'initiator': 'iqn.123'} + conn_fc = {'host': 'host', 'wwpns': 'fff123'} + conn_both = {'host': 'host', 'initiator': 'iqn.123', 'wwpns': 'fff123'} + + self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) + self.iscsi_driver.validate_connector(conn_iscsi) + self.iscsi_driver.validate_connector(conn_both) + self.assertRaises(exception.InvalidConnectorException, + self.iscsi_driver.validate_connector, conn_fc) + self.assertRaises(exception.InvalidConnectorException, + self.iscsi_driver.validate_connector, conn_neither) + + def test_initialize_connection(self): + volume = self._create_volume() + result = self.iscsi_driver.initialize_connection(volume, + self._connector) + ip_connect = self.iscsi_driver._cmd.get_ip_connect( + str(self.configuration.acs5000_target)) + self.assertEqual('iscsi', result['driver_volume_type']) + self.assertEqual(ip_connect['iscsi_name'], + result['data']['target_iqns']) + self.assertEqual(ip_connect['portal'], + result['data']['target_portals']) + self.assertEqual(volume['id'], result['data']['volume_id']) + self.assertEqual(len(ip_connect['portal']), + len(result['data']['target_portals'])) + self._delete_volume(volume) + + def test_initialize_connection_not_found(self): + prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], + 'size': 1, + 'volume_type_id': self.vt['id']} + vol = testutils.create_volume(self.ctxt, **prop) + self.assertRaises(exception.VolumeNotFound, + self.iscsi_driver.initialize_connection, + vol, self._connector) + self.db.volume_destroy(self.ctxt, vol['id']) + + def test_initialize_connection_failure(self): + volume_list = [] + for i in LUN_NUMS_AVAILABLE: + vol = self._create_volume() + self.iscsi_driver.initialize_connection( + vol, self._connector) + volume_list.append(vol) + + vol = self._create_volume() + self.assertRaises(exception.ISCSITargetAttachFailed, + self.iscsi_driver.initialize_connection, + vol, self._connector) + self._delete_volume(vol) + for v in volume_list: + self.iscsi_driver.terminate_connection( + v, self._connector) + self._delete_volume(v) + + def test_initialize_connection_multi_host(self): + connector = self._connector + initiator1 = ('test.iqn.%s' + % str(random.randint(10000, 99999))) + initiator2 = ('test.iqn.%s' + % str(random.randint(10000, 99999))) + connector['initiator'] = [initiator1, initiator2] + volume = self._create_volume() + self.iscsi_driver.initialize_connection( + volume, connector) + lun_maps = self._assert_lun_exists(volume['id'], True) + hosts = [] + for lun in lun_maps: + hosts.append(lun['host']) + self.assertIn(initiator1, hosts) + self.assertIn(initiator2, hosts) + self.iscsi_driver.terminate_connection( + volume, connector) + self._assert_lun_exists(volume['id'], False) + self._delete_volume(volume) + + def test_terminate_connection(self): + volume = self._create_volume() + self.iscsi_driver.initialize_connection(volume, + self._connector) + self.iscsi_driver.terminate_connection(volume, + self._connector) + self._delete_volume(volume) + + +class Acs5000CommonDriverTestCase(test.TestCase): + @mock.patch.object(time, 'sleep') + def setUp(self, mock_sleep): + super(Acs5000CommonDriverTestCase, self).setUp() + + self.configuration = mock.Mock(conf.Configuration) + self.configuration.san_is_local = False + self.configuration.san_ip = '23.44.56.78' + self.configuration.san_login = 'cliuser' + self.configuration.san_password = 'clipassword' + self.configuration.acs5000_volpool_name = POOLS_NAME + self.configuration.acs5000_target = 0 + self.configuration.acs5000_copy_interval = 0.01 + self.configuration.reserved_percentage = 0 + self._driver = Acs5000ISCSIFakeDriver( + configuration=self.configuration) + options = acs5000_iscsi.Acs5000ISCSIDriver.get_driver_options() + config = conf.Configuration(options, conf.SHARED_CONF_GROUP) + self.override_config('san_ip', '23.44.56.78', conf.SHARED_CONF_GROUP) + self.override_config('san_login', 'cliuser', conf.SHARED_CONF_GROUP) + self.override_config('san_password', 'clipassword', + conf.SHARED_CONF_GROUP) + self.override_config('acs5000_volpool_name', POOLS_NAME, + conf.SHARED_CONF_GROUP) + self.override_config('acs5000_target', 0, conf.SHARED_CONF_GROUP) + self._iscsi_driver = acs5000_iscsi.Acs5000ISCSIDriver( + configuration=config) + initiator = 'test.iqn.%s' % str(random.randint(10000, 99999)) + self._connector = {'ip': '1.234.56.78', + 'host': 'stack', + 'wwpns': [], + 'initiator': initiator} + self.sim = CommandSimulator(POOLS_NAME) + self._driver.set_fake_storage(self.sim) + self.ctxt = context.get_admin_context() + + db_driver = CONF.db_driver + self.db = importutils.import_module(db_driver) + self._driver.db = self.db + self._driver.do_setup(None) + self._driver.check_for_setup_error() + + def _assert_vol_exists(self, name, exists): + volume = self._driver._cmd.get_volume(VOLUME_PRE + name[-12:]) + is_vol_defined = False + if volume: + is_vol_defined = True + self.assertEqual(exists, is_vol_defined) + return volume + + def _assert_snap_exists(self, name, exists): + snap_name = VOLUME_PRE + name[-12:] + snapshot_list = self.sim._snapshots_list + is_snap_defined = False + snapshot = {} + for snap in snapshot_list: + if snap['name'] == snap_name: + is_snap_defined = True + snapshot = snap + break + self.assertEqual(exists, is_snap_defined) + return snapshot + + def _create_volume(self, **kwargs): + prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], + 'size': 1, + 'volume_type_id': self.vt['id']} + driver = True + if 'driver' in kwargs: + if not kwargs['driver']: + driver = False + kwargs.pop('driver') + for p in prop.keys(): + if p not in kwargs: + kwargs[p] = prop[p] + vol = testutils.create_volume(self.ctxt, **kwargs) + if driver: + self._driver.create_volume(vol) + return vol + + def _delete_volume(self, volume, driver=True): + if driver: + self._driver.delete_volume(volume) + self.db.volume_destroy(self.ctxt, volume['id']) + + def _create_snapshot(self, vol_id, driver=True): + snap = testutils.create_snapshot(self.ctxt, vol_id) + if driver: + self._driver.create_snapshot(snap) + return snap + + def _delete_snapshot(self, snap, driver=True): + if driver: + self._driver.delete_snapshot(snap) + self.db.snapshot_destroy(self.ctxt, snap['id']) + + def test_run_ssh_failure(self): + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._build_pool_stats, + 'error_pool') + ssh_cmd = ['cinder', 'storage', 'error_ssh', '--error', 'json_error'] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.run_ssh_info, ssh_cmd) + ssh_cmd = ['cinder', 'storage', 'error_ssh', '--error', 'dict_error'] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.run_ssh_info, ssh_cmd) + ssh_cmd = ['cinder', 'storage', 'error_ssh', '--error', 'keys_error'] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.run_ssh_info, ssh_cmd) + ssh_cmd = ['cinder', 'storage', 'error_ssh', '--error', 'key_false'] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.run_ssh_info, ssh_cmd) + + @mock.patch.object(ssh_utils, 'SSHPool') + @mock.patch.object(processutils, 'ssh_execute') + def test_run_ssh_with_ip(self, mock_ssh_execute, mock_ssh_pool): + ssh_cmd = ['cinder', 'storage', 'run_ssh'] + self._iscsi_driver._run_ssh(ssh_cmd) + mock_ssh_pool.assert_called_once_with( + self._iscsi_driver.configuration.san_ip, + self._iscsi_driver.configuration.san_ssh_port, + self._iscsi_driver.configuration.ssh_conn_timeout, + self._iscsi_driver.configuration.san_login, + password=self._iscsi_driver.configuration.san_password, + min_size=self._iscsi_driver.configuration.ssh_min_pool_conn, + max_size=self._iscsi_driver.configuration.ssh_max_pool_conn) + + mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] + self._iscsi_driver._run_ssh(ssh_cmd) + mock_ssh_pool.assert_called_once_with( + self._iscsi_driver.configuration.san_ip, + self._iscsi_driver.configuration.san_ssh_port, + self._iscsi_driver.configuration.ssh_conn_timeout, + self._iscsi_driver.configuration.san_login, + password=self._iscsi_driver.configuration.san_password, + min_size=self._iscsi_driver.configuration.ssh_min_pool_conn, + max_size=self._iscsi_driver.configuration.ssh_max_pool_conn) + + @mock.patch.object(ssh_utils, 'SSHPool') + @mock.patch.object(processutils, 'ssh_execute') + def test_run_ssh_with_exception(self, mock_ssh_execute, mock_ssh_pool): + mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, + mock.MagicMock()] + self.override_config('acs5000_volpool_name', None, + self._iscsi_driver.configuration.config_group) + ssh_cmd = ['cinder', 'storage', 'run_ssh'] + self.assertRaises(processutils.ProcessExecutionError, + self._iscsi_driver._run_ssh, ssh_cmd) + + def test_do_setup(self): + system_info = self.sim._system_info + self.assertEqual(system_info['vendor'], self._driver._state['vendor']) + self.assertIn('iSCSI', self._driver._state['enabled_protocols']) + self.assertEqual(2, len(self._driver._state['storage_nodes'])) + + def test_do_setup_no_pools(self): + self._driver.pools = ['pool_error'] + self.assertRaises(exception.InvalidInput, + self._driver.do_setup, None) + + def test_create_volume(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + self._delete_volume(vol) + + def test_create_volume_same_name(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.create_volume, vol) + self._delete_volume(vol) + + def test_create_volume_size_exceeds_limit(self): + prop = { + 'host': 'stack@ty2#%s' % POOLS_NAME[1], + 'size': 200, + 'driver': False + } + self._driver.get_volume_stats() + vol = self._create_volume(**prop) + self._assert_vol_exists(vol['id'], False) + self.assertRaises(exception.VolumeSizeExceedsLimit, + self._driver.create_volume, + vol) + self._delete_volume(vol, False) + + def test_create_volume_number_exceeds_pool_limit(self): + volume_list = [] + for i in range(VOLUME_LIMIT_ON_POOL): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + volume_list.append(vol) + vol = self._create_volume(driver=False) + self.assertRaises(exception.VolumeLimitExceeded, + self._driver.create_volume, + vol) + self._delete_volume(vol, False) + for v in volume_list: + self._delete_volume(v) + + def test_create_volume_number_exceeds_system_limit(self): + volume_list = [] + volume_count_on_pool = int(VOLUME_LIMIT_ON_SYSTEM + / len(POOLS_NAME)) + for i in range(volume_count_on_pool): + for x in range(len(POOLS_NAME)): + vol = self._create_volume( + host='stack@ty1#%s' % POOLS_NAME[x]) + self._assert_vol_exists(vol['id'], True) + volume_list.append(vol) + vol = self._create_volume(driver=False) + self.assertRaises(exception.VolumeLimitExceeded, + self._driver.create_volume, + vol) + self._delete_volume(vol, False) + for v in volume_list: + self._delete_volume(v) + + def test_delete_volume(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + self._delete_volume(vol) + self._assert_vol_exists(vol['id'], False) + + def test_create_snapshot(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + snap = self._create_snapshot(vol['id']) + self._assert_snap_exists(snap['id'], True) + self._delete_snapshot(snap) + self._delete_volume(vol) + + def test_create_snapshot_exceed_limit(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + snapshot_list = [] + for i in range(SNAPSHOTS_A_VOLUME): + snap = self._create_snapshot(vol['id']) + self._assert_snap_exists(snap['id'], True) + snapshot_list.append(snap) + snap = self._create_snapshot(vol['id'], False) + self.assertRaises(exception.SnapshotLimitExceeded, + self._driver.create_snapshot, snap) + self._delete_snapshot(snap, False) + vol_list = [vol] + snap_count = SNAPSHOTS_A_VOLUME + while snap_count < SNAPSHOTS_ON_SYSTEM: + vol = self._create_volume() + vol_list.append(vol) + for x in range(SNAPSHOTS_A_VOLUME): + snap = self._create_snapshot(vol['id']) + self._assert_snap_exists(snap['id'], True) + snapshot_list.append(snap) + snap_count += 1 + if snap_count >= SNAPSHOTS_ON_SYSTEM: + break + + vol = self._create_volume() + vol_list.append(vol) + snap = self._create_snapshot(vol['id'], False) + self.assertRaises(exception.SnapshotLimitExceeded, + self._driver.create_snapshot, snap) + for sp in snapshot_list: + self._delete_snapshot(sp) + for vol in vol_list: + self._delete_volume(vol) + + def test_delete_snapshot(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + snap = self._create_snapshot(vol['id']) + self._assert_snap_exists(snap['id'], True) + self._delete_snapshot(snap) + self._assert_snap_exists(snap['id'], False) + self._delete_volume(vol) + + def test_delete_snapshot_not_found(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + snap = self._create_snapshot(vol['id'], False) + self._assert_snap_exists(snap['id'], False) + self.assertRaises(exception.SnapshotNotFound, + self._driver.delete_snapshot, + snap) + self._delete_snapshot(snap, False) + self._delete_volume(vol) + + def test_create_volume_from_snapshot(self): + prop = {'size': 2} + vol = self._create_volume(**prop) + self._assert_vol_exists(vol['id'], True) + snap = self._create_snapshot(vol['id']) + self._assert_snap_exists(snap['id'], True) + prop['driver'] = False + new_vol = self._create_volume(**prop) + self._driver.create_volume_from_snapshot(new_vol, snap) + new_volume = self._assert_vol_exists(new_vol['id'], True) + self.assertEqual(1, len(new_volume)) + self.assertEqual('2', new_volume[0]['size_gb']) + self.assertEqual('RAID Volume', new_volume[0]['type']) + self._delete_volume(new_vol) + self._delete_snapshot(snap) + self._delete_volume(vol) + + def test_create_volume_from_snapshot_not_found(self): + vol = self._create_volume() + self._assert_vol_exists(vol['id'], True) + snap = self._create_snapshot(vol['id'], False) + self._assert_snap_exists(snap['id'], False) + new_vol = self._create_volume(driver=False) + self._assert_vol_exists(new_vol['id'], False) + self.assertRaises(exception.SnapshotNotFound, + self._driver.create_volume_from_snapshot, + new_vol, snap) + self._delete_volume(new_vol, False) + self._delete_snapshot(snap, False) + self._delete_volume(vol) + + def test_create_snapshot_volume_not_found(self): + vol = self._create_volume(driver=False) + self._assert_vol_exists(vol['id'], False) + self.assertRaises(exception.VolumeNotFound, + self._create_snapshot, vol['id']) + self._delete_volume(vol, driver=False) + + def test_create_cloned_volume(self): + src_volume = self._create_volume() + self._assert_vol_exists(src_volume['id'], True) + tgt_volume = self._create_volume(driver=False) + self._driver.create_cloned_volume(tgt_volume, src_volume) + volume = self._assert_vol_exists(tgt_volume['id'], True) + self.assertEqual(1, len(volume)) + self.assertEqual('RAID Volume', volume[0]['type']) + self._delete_volume(src_volume) + self._delete_volume(tgt_volume) + + def test_create_cloned_volume_with_size(self): + prop = {'size': 2} + src_volume = self._create_volume(**prop) + volume = self._assert_vol_exists(src_volume['id'], True) + prop['driver'] = False + tgt_volume = self._create_volume(**prop) + self._driver.create_cloned_volume(tgt_volume, src_volume) + clone_volume = self._assert_vol_exists(tgt_volume['id'], True) + self.assertEqual(1, len(volume)) + self.assertEqual(1, len(clone_volume)) + self.assertEqual('RAID Volume', volume[0]['type']) + self.assertEqual('RAID Volume', clone_volume[0]['type']) + self.assertEqual('2', volume[0]['size_gb']) + self.assertEqual('2', clone_volume[0]['size_gb']) + self._delete_volume(src_volume) + self._delete_volume(tgt_volume) + + def test_create_cloned_volume_size_failure(self): + prop = {'size': 10} + src_volume = self._create_volume(**prop) + self._assert_vol_exists(src_volume['id'], True) + prop = {'size': 5, 'driver': False} + tgt_volume = self._create_volume(**prop) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.create_cloned_volume, + tgt_volume, src_volume) + self._assert_vol_exists(tgt_volume['id'], False) + self._delete_volume(src_volume) + self._delete_volume(tgt_volume, False) + + def test_create_cloned_volume_failure(self): + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._local_clone_copy, + None, None) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._local_clone_copy, + 'src_test', 'tgt_test') + src_volume = self._create_volume() + src_name = VOLUME_PRE + src_volume['id'][-12:] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._local_clone_copy, + src_name, 'tgt_test') + self._delete_volume(src_volume) + + def test_wait_volume_copy(self): + src_volume = self._create_volume(size=2) + src_info = self._assert_vol_exists(src_volume['id'], True)[0] + tgt_volume = self._create_volume(size=2) + tgt_info = self._assert_vol_exists(tgt_volume['id'], True)[0] + self._driver._cmd.set_volume_property( + src_info['name'], {'status': 'Queued', + 'clone_snap': tgt_info['name']}) + self._driver._cmd.set_volume_property(tgt_info['name'], + {'type': 'BACKUP'}) + src_name = VOLUME_PRE + src_volume['id'][-12:] + tgt_name = VOLUME_PRE + tgt_volume['id'][-12:] + self._driver._cmd.create_clone(src_name, tgt_name) + tgt_set = { + tgt_name: [('status', 'Erasing', 0.2)], + src_name: [('status', 'Erasing', 0.2)], + } + greenthread.spawn_n(self.sim._clone_thread, + src_name, tgt_set) + ret = self._driver._wait_volume_copy(src_name, tgt_name, + 'test_func', 'test_action') + self.assertTrue(ret) + self._driver._cmd.set_volume_property( + src_info['name'], {'status': 'error', + 'clone_snap': tgt_info['name']}) + ret = self._driver._wait_volume_copy(src_name, tgt_name, + 'test_func', 'test_action') + self.assertFalse(ret) + self._driver._cmd.set_volume_property( + src_info['name'], {'status': 'Online', + 'clone_snap': tgt_info['name']}) + self._delete_volume(tgt_volume) + self._assert_vol_exists(tgt_volume['id'], False) + ret = self._driver._wait_volume_copy(src_name, tgt_name, + 'test_func', 'test_action') + self.assertFalse(ret) + self._driver._cmd.set_volume_property(src_info['name'], + {'type': 'BACKUP'}) + ret = self._driver._wait_volume_copy(tgt_name, 'backup_test', + 'test_func', 'test_action') + self.assertFalse(ret) + self._delete_volume(src_volume) + + def test_extend_volume(self): + volume = self._create_volume(size=10) + vol_info = self._assert_vol_exists(volume['id'], True) + self.assertEqual('10', vol_info[0]['size_gb']) + self._driver.extend_volume(volume, '100') + extend_vol = self._assert_vol_exists(volume['id'], True) + self.assertEqual('100', extend_vol[0]['size_gb']) + self._delete_volume(volume) + + def test_extend_volume_not_found(self): + volume = self._create_volume(driver=False) + self.assertRaises(exception.VolumeNotFound, + self._driver.extend_volume, + volume, 10) + self._delete_volume(volume, False) + + def test_extend_volume_size_less(self): + volume = self._create_volume(size=100) + vol_info = self._assert_vol_exists(volume['id'], True) + self.assertEqual('100', vol_info[0]['size_gb']) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.extend_volume, + volume, '10') + self._delete_volume(volume) + + def test_extend_volume_size_exceeds_limit(self): + host = 'stack@ty2#%s' % POOLS_NAME[1] + self._driver.get_volume_stats() + volume = self._create_volume(size=10, host=host) + vol_info = self._assert_vol_exists(volume['id'], True) + self.assertEqual('10', vol_info[0]['size_gb']) + self.assertEqual(POOLS_NAME[1], vol_info[0]['poolname']) + self.assertRaises(exception.VolumeSizeExceedsLimit, + self._driver.extend_volume, + volume, '200') + self._delete_volume(volume) + + def test_migrate_volume_same_pool(self): + host = 'stack@ty1#%s' % POOLS_NAME[0] + volume = self._create_volume(host=host) + target_host = { + 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], + 'capabilities': { + 'system_id': self.sim._system_info['system_id'], + 'pool_name': POOLS_NAME[0] + } + } + ret = self._driver.migrate_volume(self.ctxt, volume, target_host) + self.assertEqual((True, None), ret) + + def test_migrate_volume_different_system(self): + host = 'stack@ty1#%s' % POOLS_NAME[0] + volume = self._create_volume(host=host) + target_host = { + 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], + 'capabilities': { + 'system_id': 'test_system_id', + 'pool_name': POOLS_NAME[0] + } + } + ret = self._driver.migrate_volume(self.ctxt, volume, target_host) + self.assertEqual((False, None), ret) + target_host = { + 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], + 'capabilities': { + 'pool_name': POOLS_NAME[0] + } + } + ret = self._driver.migrate_volume(self.ctxt, volume, target_host) + self.assertEqual((False, None), ret) + + def test_migrate_volume_same_system_different_pool(self): + host = 'stack@ty1#%s' % POOLS_NAME[0] + volume = self._create_volume(host=host, size=2) + target_host = { + 'host': 'stack_new@ty1#%s' % POOLS_NAME[1], + 'capabilities': { + 'system_id': self.sim._system_info['system_id'], + 'pool_name': POOLS_NAME[1] + } + } + ret = self._driver.migrate_volume(self.ctxt, volume, target_host) + self.assertEqual((True, None), ret) + vol_info = self._assert_vol_exists(volume['id'], True) + self.assertEqual(POOLS_NAME[1], vol_info[0]['poolname']) + self.assertEqual('2', vol_info[0]['size_gb']) + + def test_get_volume_stats(self): + self.assertEqual({}, self._driver._stats) + self._driver.get_volume_stats() + stats = self._driver._stats + system_info = self.sim._system_info + self.assertEqual(system_info['vendor'], stats['vendor_name']) + + def test_get_volume_none(self): + ret = self._driver._cmd.get_volume('') + self.assertEqual([], ret) + ret = self._driver._cmd.get_volume('test_volume') + self.assertEqual([], ret) + + def test_check_for_setup_error_failure(self): + self._driver._state['system_name'] = None + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.check_for_setup_error) + self._driver.do_setup(None) + self._driver._state['system_id'] = None + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.check_for_setup_error) + self._driver.do_setup(None) + self._driver._state['storage_nodes'] = [] + self.assertRaises(exception.VolumeDriverException, + self._driver.check_for_setup_error) + self._driver.do_setup(None) + self._driver._state['enabled_protocols'] = set() + self.assertRaises(exception.InvalidInput, + self._driver.check_for_setup_error) + self._driver.do_setup(None) + self._driver.configuration.san_password = None + self.assertRaises(exception.InvalidInput, + self._driver.check_for_setup_error) + self._driver.do_setup(None) + + def test_build_pool_stats_no_pool(self): + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._build_pool_stats, + 'pool_test') + + def test_set_volume_property_failure(self): + volume = self._create_volume() + self._assert_vol_exists(volume['id'], True) + volume_name = VOLUME_PRE + volume['id'][-12:] + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.set_volume_property, + volume_name, {'error_key': 'error'}) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver._cmd.set_volume_property, + volume_name, {}) + self._delete_volume(volume) diff --git a/cinder/volume/drivers/toyou/__init__.py b/cinder/volume/drivers/toyou/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/toyou/acs5000/__init__.py b/cinder/volume/drivers/toyou/acs5000/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/toyou/acs5000/acs5000_common.py b/cinder/volume/drivers/toyou/acs5000/acs5000_common.py new file mode 100644 index 00000000000..f69bf1c0f20 --- /dev/null +++ b/cinder/volume/drivers/toyou/acs5000/acs5000_common.py @@ -0,0 +1,828 @@ +# Copyright 2020 toyou Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +acs5000 san for common driver +It will be called by iSCSI driver +""" + +import json +import random + +from eventlet import greenthread +from oslo_concurrency import processutils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +import paramiko + +from cinder import coordination +from cinder import exception +from cinder.i18n import _ +from cinder import ssh_utils +from cinder import utils as cinder_utils +from cinder.volume import driver +from cinder.volume.drivers.san import san +from cinder.volume import volume_utils + +VOLUME_PREFIX = 'cinder-' + +LOG = logging.getLogger(__name__) +acs5000c_opts = [ + cfg.ListOpt( + 'acs5000_volpool_name', + default=['pool01'], + help='Comma separated list of storage system storage ' + 'pools for volumes.'), + cfg.IntOpt( + 'acs5000_copy_interval', + default=5, + min=3, + max=100, + help='When volume copy task is going on,refresh volume ' + 'status interval') +] +CONF = cfg.CONF +CONF.register_opts(acs5000c_opts) + + +class Command(object): + + def __init__(self, run_ssh): + self._ssh = run_ssh + + def _run_ssh(self, ssh_cmd): + try: + return self._ssh(ssh_cmd) + except processutils.ProcessExecutionError as e: + msg = (_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s.') % + {'cmd': ssh_cmd, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def run_ssh_info(self, ssh_cmd, key=False): + """Run an SSH command and return parsed output.""" + out, err = self._run_ssh(ssh_cmd) + if len(err): + msg = (_('Execute command %(cmd)s failed, ' + 'out: %(out)s, err: %(err)s.') % + {'cmd': ' '.join(ssh_cmd), + 'out': str(out), + 'err': str(err)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + try: + info = json.loads(out) + except json.JSONDecodeError as e: + msg = (_('Parse response error from CLI command %(cmd)s, ' + 'out: %(out)s, err: %(err)s') % + {'cmd': ' '.join(ssh_cmd), + 'out': str(out), + 'err': e}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not isinstance(info, dict): + msg = (_('Unexpected format from CLI command %(cmd)s, ' + 'result: %(info)s.') % + {'cmd': ' '.join(ssh_cmd), + 'info': str(info)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + require = ('key', 'msg', 'arr') + require_valid = True + for r in require: + if r not in info.keys(): + require_valid = False + break + if not require_valid: + msg = (_('Unexpected response from CLI command %(cmd)s, ' + 'require \'key\' \'msg\' \'arr\'. out: %(info)s.') % + {'cmd': ' '.join(ssh_cmd), + 'info': str(info)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif int(info['key']) != 0: + msg = (_('Unexpected error output from CLI command %(cmd)s, ' + 'key: %(key)s, msg: %(msg)s.') % + {'cmd': ' '.join(ssh_cmd), + 'msg': info['msg'], + 'key': info['key']}) + LOG.error(msg) + if not key: + raise exception.VolumeBackendAPIException(data=msg) + if key: + info['key'] = int(info['key']) + return info + else: + return info['arr'] + + def get_system(self): + ssh_cmd = ['cinder', 'Storage', 'sshGetSystem'] + return self.run_ssh_info(ssh_cmd) + + def get_ip_connect(self, target): + ssh_cmd = ['cinder', + 'Storage', + 'sshGetIscsiConnect', + '--target', + target] + return self.run_ssh_info(ssh_cmd) + + def get_pool_info(self, pool): + ssh_cmd = ['cinder', + 'Storage', + 'sshGetPoolInfo', + '--poolName', + pool] + return self.run_ssh_info(ssh_cmd) + + def get_volume(self, volume): + ssh_cmd = ['cinder', + 'Storage', + 'sshGetVolume'] + if not volume: + return [] + elif isinstance(volume, str): + ssh_cmd.append('--name') + ssh_cmd.append(volume) + elif isinstance(volume, list): + for vol in volume: + ssh_cmd.append('--name') + ssh_cmd.append(vol) + result = self.run_ssh_info(ssh_cmd) + if not result: + return [] + else: + return result + + def ls_ctr_info(self): + ssh_cmd = ['cinder', 'Storage', 'sshGetCtrInfo'] + ctrs = self.run_ssh_info(ssh_cmd) + nodes = {} + for node_data in ctrs: + nodes[node_data['id']] = { + 'id': node_data['id'], + 'name': node_data['name'], + 'iscsi_name': node_data['iscsi_name'], + 'WWNN': node_data['WWNN'], + 'WWPN': [], + 'status': node_data['status'], + 'ipv4': [], + 'ipv6': [], + 'enabled_protocols': [] + } + return nodes + + def create_volume(self, name, size, pool_name, type='0'): + ssh_cmd = ['cinder', + 'Storage', + 'sshCreateVolume', + '--volumesize', + size, + '--volumename', + name, + '--cinderPool', + pool_name, + '--type', + type] + return self.run_ssh_info(ssh_cmd, key=True) + + def delete_volume(self, volume): + ssh_cmd = ['cinder', + 'Storage', + 'sshDeleteVolume', + '--cinderVolume', + volume] + return self.run_ssh_info(ssh_cmd) + + def extend_volume(self, volume, size): + ssh_cmd = ['cinder', + 'Storage', + 'sshCinderExtendVolume', + '--cinderVolume', + volume, + '--extendunit', + 'gb', + '--extendsize', + str(size)] + return self.run_ssh_info(ssh_cmd, key=True) + + def create_clone(self, volume_name, clone_name): + ssh_cmd = ['cinder', + 'Storage', + 'sshMkLocalClone', + '--cinderVolume', + volume_name, + '--cloneVolume', + clone_name] + return self.run_ssh_info(ssh_cmd, key=True) + + def start_clone(self, volume_name, snapshot=''): + ssh_cmd = ['cinder', + 'Storage', + 'sshMkStartLocalClone', + '--cinderVolume', + volume_name, + '--snapshot', + snapshot] + return self.run_ssh_info(ssh_cmd, key=True) + + def delete_clone(self, volume_name, snapshot=''): + ssh_cmd = ['cinder', + 'Storage', + 'sshRemoveLocalClone', + '--name', + volume_name, + '--snapshot', + snapshot] + return self.run_ssh_info(ssh_cmd, key=True) + + def create_lun_map(self, volume_name, protocol, host, target=None): + """Map volume to host.""" + LOG.debug('enter: create_lun_map volume %s.', volume_name) + ssh_cmd = ['cinder', + 'Storage', + 'sshMapVoltoHost', + '--cinderVolume', + volume_name, + '--protocol', + protocol] + if isinstance(host, list): + for ht in host: + ssh_cmd.append('--host') + ssh_cmd.append(ht) + else: + ssh_cmd.append('--host') + ssh_cmd.append(str(host)) + if target: + ssh_cmd.append('--target') + ssh_cmd.append(target) + return self.run_ssh_info(ssh_cmd, key=True) + + def delete_lun_map(self, volume_name, protocol, host, target=''): + ssh_cmd = ['cinder', + 'Storage', + 'sshDeleteLunMap', + '--cinderVolume', + volume_name, + '--protocol', + protocol] + if isinstance(host, list): + for ht in host: + ssh_cmd.append('--cinderHost') + ssh_cmd.append(ht) + else: + ssh_cmd.append('--cinderHost') + ssh_cmd.append(str(host)) + if target: + ssh_cmd.append('--cinderTarget') + ssh_cmd.append(target) + return self.run_ssh_info(ssh_cmd, key=True) + + def create_snapshot(self, volume_name, snapshot_name): + ssh_cmd = ['cinder', + 'Storage', + 'sshCreateSnapshot', + '--volume', + volume_name, + '--snapshot', + snapshot_name] + return self.run_ssh_info(ssh_cmd, key=True) + + def delete_snapshot(self, volume_name, snapshot_name): + ssh_cmd = ['cinder', + 'Storage', + 'sshDeleteSnapshot', + '--volume', + volume_name, + '--snapshot', + snapshot_name] + return self.run_ssh_info(ssh_cmd, key=True) + + def set_volume_property(self, name, setting): + ssh_cmd = ['cinder', + 'Storage', + 'sshSetVolumeProperty', + '--volume', + name] + for key, value in setting.items(): + ssh_cmd.extend(['--' + key, value]) + return self.run_ssh_info(ssh_cmd, key=True) + + +class Acs5000CommonDriver(san.SanDriver, + driver.MigrateVD, + driver.CloneableImageVD): + """TOYOU ACS5000 storage abstract common class. + + .. code-block:: none + + Version history: + 1.0.0 - Initial driver + + """ + VENDOR = 'TOYOU' + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(Acs5000CommonDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(acs5000c_opts) + self._backend_name = self.configuration.safe_get('volume_backend_name') + self.pools = self.configuration.acs5000_volpool_name + self._cmd = Command(self._run_ssh) + self.protocol = None + self._state = {'storage_nodes': {}, + 'enabled_protocols': set(), + 'system_name': None, + 'system_id': None, + 'code_level': None, + 'version': None} + + @cinder_utils.trace_method + def do_setup(self, ctxt): + """Check that we have all configuration details from the storage.""" + self._validate_pools_exist() + + self._state.update(self._cmd.get_system()) + + self._state['storage_nodes'] = self._cmd.ls_ctr_info() + ports = self._cmd.get_ip_connect( + str(self.configuration.acs5000_target)) + if ports['iscsi_name']: + self._state['enabled_protocols'].add('iSCSI') + iscsi_name = ' '.join(ports['iscsi_name']) + for node in self._state['storage_nodes'].values(): + if ('ctr%s' % node['id']) in iscsi_name: + node['enabled_protocols'].append('iSCSI') + return + + def _validate_pools_exist(self): + LOG.debug('_validate_pools_exist. ' + 'pools: %s', ' '.join(self.pools)) + for pool in self.pools: + pool_data = self._cmd.get_pool_info(pool) + if not pool_data: + msg = _('Failed getting details for pool %s.') % pool + raise exception.InvalidInput(reason=msg) + return True + + @cinder_utils.trace_method + def check_for_setup_error(self): + """Ensure that the params are set properly.""" + if self._state['system_name'] is None: + exception_msg = _('Unable to determine system name.') + raise exception.VolumeBackendAPIException(data=exception_msg) + if self._state['system_id'] is None: + exception_msg = _('Unable to determine system id.') + raise exception.VolumeBackendAPIException(data=exception_msg) + if len(self._state['storage_nodes']) != 2: + msg = _('do_setup: No configured nodes.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + if self.protocol not in self._state['enabled_protocols']: + raise exception.InvalidInput( + reason=(_('The storage device does not support %(prot)s. ' + 'Please configure the device to support %(prot)s ' + 'or switch to a driver using a different ' + 'protocol.') % {'prot': self.protocol})) + required = ['san_ip', + 'san_ssh_port', + 'san_login', + 'acs5000_volpool_name'] + for param in required: + if not self.configuration.safe_get(param): + raise exception.InvalidInput( + reason=_('%s is not set.') % param) + + if not self.configuration.san_password: + raise exception.InvalidInput( + reason='Password is required for authentication') + + return + + def _run_ssh(self, cmd_list, check_exit_code=True): + cinder_utils.check_ssh_injection(cmd_list) + command = ' '.join(cmd_list) + if not self.sshpool: + try: + self.sshpool = self._set_up_sshpool(self.configuration.san_ip) + except paramiko.SSHException as e: + raise exception.VolumeDriverException(message=e) + ssh_execute = self._ssh_execute( + self.sshpool, command, check_exit_code) + return ssh_execute + + def _set_up_sshpool(self, ip): + port = self.configuration.get('san_ssh_port', 22) + login = self.configuration.get('san_login') + password = self.configuration.get('san_password') + timeout = self.configuration.get('ssh_conn_timeout', 30) + min_size = self.configuration.get('ssh_min_pool_conn', 1) + max_size = self.configuration.get('ssh_max_pool_conn', 5) + sshpool = ssh_utils.SSHPool(ip, + port, + timeout, + login, + password=password, + min_size=min_size, + max_size=max_size) + return sshpool + + def _ssh_execute( + self, + sshpool, + command, + check_exit_code=True): + # noinspection PyBroadException + try: + with sshpool.item() as ssh: + try: + return processutils.ssh_execute( + ssh, command, check_exit_code=check_exit_code) + except Exception as e: + LOG.error('Error has occurred: %s', e) + raise processutils.ProcessExecutionError( + exit_code=e.exit_code, + stdout=e.stdout, + stderr=e.stderr, + cmd=e.cmd) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Error running SSH command: %s', command) + + def create_volume(self, volume): + LOG.debug('create_volume, volume %s.', volume['id']) + volume_name = VOLUME_PREFIX + volume['id'][-12:] + pool_name = volume_utils.extract_host(volume['host'], 'pool') + ret = self._cmd.create_volume( + volume_name, + str(volume['size']), + pool_name) + if ret['key'] == 310: + msg = _('Volume: %s with same name ' + 'already exists on the system.') % volume_name + raise exception.VolumeBackendAPIException(data=msg) + elif ret['key'] == 102: + allow_size = 0 + for p in self._stats['pools']: + if p['pool_name'] == pool_name: + allow_size = p['free_capacity_gb'] + break + raise exception.VolumeSizeExceedsLimit(size=int(volume['size']), + limit=allow_size) + elif ret['key'] == 307: + raise exception.VolumeLimitExceeded(allowed=96, + name=volume_name) + elif ret['key'] == 308: + raise exception.VolumeLimitExceeded(allowed=4096, + name=volume_name) + model_update = None + return model_update + + def delete_volume(self, volume): + volume_name = VOLUME_PREFIX + volume['id'][-12:] + self._cmd.delete_volume(volume_name) + + def create_snapshot(self, snapshot): + volume_name = VOLUME_PREFIX + snapshot['volume_name'][-12:] + snapshot_name = VOLUME_PREFIX + snapshot['name'][-12:] + ret = self._cmd.create_snapshot(volume_name, snapshot_name) + if ret['key'] == 303: + raise exception.VolumeNotFound(volume_id=volume_name) + elif ret['key'] == 503: + raise exception.SnapshotLimitExceeded(allowed=4096) + elif ret['key'] == 504: + raise exception.SnapshotLimitExceeded(allowed=64) + + def delete_snapshot(self, snapshot): + volume_name = VOLUME_PREFIX + snapshot['volume_name'][-12:] + snapshot_name = VOLUME_PREFIX + snapshot['name'][-12:] + ret = self._cmd.delete_snapshot(volume_name, snapshot_name) + if ret['key'] == 505: + raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) + + def create_volume_from_snapshot(self, volume, snapshot): + snapshot_name = VOLUME_PREFIX + snapshot['name'][-12:] + volume_name = VOLUME_PREFIX + volume['id'][-12:] + source_volume = VOLUME_PREFIX + snapshot['volume_name'][-12:] + pool = volume_utils.extract_host(volume['host'], 'pool') + self._cmd.create_volume(volume_name, + str(volume['size']), + pool, '10') + self._local_clone_copy(source_volume, + volume_name, + 'create_volume_from_snapshot', + snapshot_name) + + def create_cloned_volume(self, tgt_volume, src_volume): + clone_name = VOLUME_PREFIX + tgt_volume['id'][-12:] + volume_name = VOLUME_PREFIX + src_volume['id'][-12:] + tgt_pool = volume_utils.extract_host(tgt_volume['host'], 'pool') + try: + self._cmd.create_volume(clone_name, str( + tgt_volume['size']), tgt_pool, '10') + self._local_clone_copy( + volume_name, clone_name, 'create_cloned_volume') + except exception.VolumeBackendAPIException: + self._cmd.delete_volume(clone_name) + raise exception.VolumeBackendAPIException( + data='create_cloned_volume failed.') + + def extend_volume(self, volume, new_size): + volume_name = VOLUME_PREFIX + volume['id'][-12:] + ret = self._cmd.extend_volume(volume_name, int(new_size)) + if ret['key'] == 303: + raise exception.VolumeNotFound(volume_id=volume_name) + elif ret['key'] == 321: + msg = _('Volume capacity shall not be ' + 'less than the current size %sG.') % volume['size'] + raise exception.VolumeBackendAPIException(data=msg) + elif ret['key'] == 102: + pool_name = volume_utils.extract_host(volume['host'], 'pool') + allow_size = 0 + for p in self._stats['pools']: + if p['pool_name'] == pool_name: + allow_size = p['free_capacity_gb'] + break + raise exception.VolumeSizeExceedsLimit(size=int(new_size), + limit=allow_size) + + def migrate_volume(self, ctxt, volume, host): + LOG.debug('enter: migrate_volume id %(id)s, host %(host)s', + {'id': volume['id'], 'host': host['host']}) + pool = volume_utils.extract_host(volume['host'], 'pool') + if 'system_id' not in host['capabilities']: + LOG.error('Target host has no system_id') + return (False, None) + if host['capabilities']['system_id'] != self._state['system_id']: + LOG.info('The target host does not belong to the same ' + 'storage system as the current volume') + return (False, None) + if host['capabilities']['pool_name'] == pool: + LOG.info('The target host belongs to the same storage system ' + 'and pool as the current volume.') + return (True, None) + LOG.info('The target host belongs to the same storage system ' + 'as the current but to a different pool. ' + 'The same storage system will clone volume into the new pool') + volume_name = VOLUME_PREFIX + volume['id'][-12:] + tmp_name = VOLUME_PREFIX + 'tmp' + tmp_name += str(random.randint(0, 999999)).zfill(8) + self._cmd.create_volume(tmp_name, + str(volume['size']), + host['capabilities']['pool_name'], + '10') + self._local_clone_copy( + volume_name, tmp_name, 'migrate_volume') + self._cmd.delete_volume(volume_name) + self._cmd.set_volume_property(tmp_name, + {'type': '"RAID Volume"', + 'new_name': volume_name}) + return (True, None) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If we haven't gotten stats yet or 'refresh' is True, + run update the stats first. + """ + if not self._stats or refresh: + self._update_volume_stats() + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + LOG.debug('Updating volume stats, ' + 'pools: \'%(host)s#%(pool)s\'.', + {'host': self.host, + 'pool': ','.join(self.pools)}) + data = {} + data['vendor_name'] = self.VENDOR + data['driver_version'] = self.VERSION + data['storage_protocol'] = self.protocol + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = (backend_name or + self._state['system_name']) + data['pools'] = [self._build_pool_stats(pool) + for pool in self.pools] + + self._stats = data + + def _build_pool_stats(self, pool): + """Build pool status""" + pool_stats = {} + try: + pool_data = self._cmd.get_pool_info(pool) + if pool_data: + total_capacity_gb = float(pool_data['capacity']) / units.Gi + free_capacity_gb = float(pool_data['free_capacity']) / units.Gi + allocated_capacity_gb = float( + pool_data['used_capacity']) / units.Gi + total_volumes = None + if 'total_volumes' in pool_data.keys(): + total_volumes = int(pool_data['total_volumes']) + pool_stats = { + 'pool_name': pool_data['name'], + 'total_capacity_gb': total_capacity_gb, + 'free_capacity_gb': free_capacity_gb, + 'allocated_capacity_gb': allocated_capacity_gb, + 'compression_support': True, + 'reserved_percentage': + self.configuration.reserved_percentage, + 'QoS_support': False, + 'consistencygroup_support': False, + 'multiattach': False, + 'easytier_support': False, + 'total_volumes': total_volumes, + 'system_id': self._state['system_id']} + else: + msg = _('Backend storage pool "%s" not found.') % pool + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + except exception.VolumeBackendAPIException: + msg = _('Failed getting details for pool %s.') % pool + raise exception.VolumeBackendAPIException(data=msg) + + return pool_stats + + def _local_clone_copy(self, volume, clone, action=None, snapshot=''): + LOG.debug('enter: copy volume %(vol)s to %(clone)s by %(action)s.', + {'vol': volume, + 'clone': clone, + 'action': action}) + if self._wait_volume_copy(volume, clone, action, 'wait'): + LOG.info('start copy task.') + ret = self._cmd.create_clone(volume, clone) + if ret['key'] != 0: + self._cmd.delete_volume(clone) + if ret['key'] == 306: + raise exception.VolumeBackendAPIException( + data='The source volume must not be larger ' + 'than the target volume in a clone relation. ') + elif ret['key'] == 0: + ret = self._cmd.start_clone(volume, snapshot) + if ret['key'] == 505: + raise exception.SnapshotNotFound(snapshot_id=snapshot) + else: + LOG.error('%(action)s failed.', {'action': action}) + raise exception.VolumeBackendAPIException(data='clone failed!') + + if self._wait_volume_copy(volume, clone, action, 'copy'): + self._cmd.delete_clone(volume, snapshot) + LOG.info('%s successfully.', action) + else: + LOG.error('%(action)s failed.', {'action': action}) + raise exception.VolumeBackendAPIException(data='clone failed!') + LOG.debug('leave: copy volume %(vol)s to %(clone)s by %(action)s. ', + {'vol': volume, + 'clone': clone, + 'action': action}) + + @coordination.synchronized('acs5000-copy-{volume}-task') + def _wait_volume_copy(self, volume, clone, function=None, action=None): + LOG.debug('_wait_volume_copy, volume %s.', volume) + if volume is None or clone is None: + LOG.error('volume parameter error.') + return False + ret = False + while_exit = False + rescan = 0 + interval = self.configuration.acs5000_copy_interval + wait_status = ( + 'Initiating', + 'Rebuilding', + 'Erasing', + 'Delayed rebuilding') + # All status + # {"Offline", "Online", "Initiating", + # ###"Rebuilding", "Migrating", "Parity chking", + # ###"Cloning", "Rolling back", "Parity chking", + # ###"Replicating", "Erasing", "Moving", "Replacing", + # "Reclaiming", "Delayed rebuilding", "Relocation", "N/A"}; + # All health + # {"Optimal", "Degraded", "Deleted", "Missing", "Failed", + # "Partially optimal", "N/A"} + while True: + rescan += 1 + volume_info = self._cmd.get_volume([volume, clone]) + if len(volume_info) == 2: + for vol in volume_info: + if vol['type'] == 'BACKUP': + if vol['health'] == 'Optimal' and ( + vol['status'] in wait_status): + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times, clone %(clone)s ' + 'need wait,status is %(status)s, ' + 'health is %(health)s, ' + 'process is %(process)s%%. ', + {'function': function, + 'action': action, + 'scan': rescan, + 'clone': vol['name'], + 'status': vol['status'], + 'health': vol['health'], + 'process': vol['r']}) + elif vol['status'] == 'Cloning': + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'copy process %(process)s%%. ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': vol['name'], + 'process': vol['r']}) + elif vol['status'] == 'Queued': + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times, ' + 'volume %(volume)s is in the queue. ', + {'function': function, + 'action': action, 'scan': rescan, + 'volume': vol['name']}) + elif (vol['type'] == 'RAID Volume' + and vol['status'] == 'Online'): + ret = True + while_exit = True + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'copy task completed,status is Online. ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': vol['name']}) + elif (vol['health'] == 'Optimal' + and (vol['status'] in wait_status)): + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'need wait, ' + 'status is %(status)s,health is %(health)s, ' + 'process is %(process)s%%. ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': vol['name'], + 'status': vol['status'], + 'health': vol['health'], + 'process': vol['r']}) + else: + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'is not normal, ' + 'status %(status)s,health is %(health)s. ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': vol['name'], + 'status': vol['status'], + 'health': vol['health']}) + while_exit = True + break + elif len(volume_info) == 1: + while_exit = True + if volume_info[0]['name'] == volume: + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,clone %(clone)s ' + 'does not exist! ', + {'function': function, + 'action': action, + 'scan': rescan, + 'clone': clone}) + else: + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'does not exist! ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': volume}) + else: + while_exit = True + LOG.info('%(function)s %(action)s task: ' + 'rescan %(scan)s times,volume %(volume)s ' + 'clone %(clone)s does not exist! ', + {'function': function, + 'action': action, + 'scan': rescan, + 'volume': volume, + 'clone': clone}) + + if while_exit: + break + greenthread.sleep(interval) + return ret diff --git a/cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py b/cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py new file mode 100644 index 00000000000..fff3e2a87e6 --- /dev/null +++ b/cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py @@ -0,0 +1,136 @@ +# Copyright 2020 toyou Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +acs5000 iSCSI driver +""" + +from oslo_config import cfg +from oslo_log import log as logging + +from cinder import exception +from cinder import interface +from cinder import utils +from cinder.volume.drivers.toyou.acs5000 import acs5000_common + +LOG = logging.getLogger(__name__) + +acs5000_iscsi_opts = [ + cfg.IntOpt( + 'acs5000_target', + default=0, + min=0, + max=127, + help='A storage system iSCSI support 0 - 127 targets. ' + 'Devices connected to the SCSI bus are usually ' + 'described by the target ID(also known as the SCSI ID). ' + 'Multiple LUN numbers can be supported under a target. ' + 'A single device is usually identified ' + 'by the target and the LUN.') +] + +CONF = cfg.CONF +CONF.register_opts(acs5000_iscsi_opts) + + +@interface.volumedriver +class Acs5000ISCSIDriver(acs5000_common.Acs5000CommonDriver): + """TOYOU ACS5000 storage iSCSI volume driver. + + .. code-block:: none + + Version history: + 1.0.0 - Initial driver + + """ + + VENDOR = 'TOYOU' + VERSION = '1.0.0' + PROTOCOL = 'iSCSI' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = 'TOYOU_ACS5000_CI' + + def __init__(self, *args, **kwargs): + super(Acs5000ISCSIDriver, self).__init__(*args, **kwargs) + self.protocol = self.PROTOCOL + self.configuration.append_config_values( + acs5000_iscsi_opts) + + @staticmethod + def get_driver_options(): + return acs5000_common.acs5000c_opts + acs5000_iscsi_opts + + def validate_connector(self, connector): + """Check connector for at least one enabled iSCSI protocol.""" + if 'initiator' not in connector: + LOG.error('The connector does not ' + 'contain the required information.') + raise exception.InvalidConnectorException( + missing='initiator') + + @utils.synchronized('Acs5000A-host', external=True) + def initialize_connection(self, volume, connector): + LOG.debug('initialize_connection: volume %(vol)s with connector ' + '%(conn)s', {'vol': volume['id'], 'conn': connector}) + volume_name = acs5000_common.VOLUME_PREFIX + volume['name'][-12:] + target = self.configuration.acs5000_target + ret = self._cmd.create_lun_map(volume_name, + 'WITH_ISCSI', + connector['initiator'], + str(target)) + if ret['key'] == 303: + raise exception.VolumeNotFound(volume_id=volume_name) + elif ret['key'] == 402: + raise exception.ISCSITargetAttachFailed(volume_id=volume_name) + else: + volume_attributes = self._cmd.get_ip_connect(str(target)) + lun_info = ret['arr'] + lun = [] + for i in range(len(volume_attributes['portal'])): + lun.append(int(lun_info['info'])) + properties = {} + properties['target_discovered'] = False + properties['target_iqns'] = volume_attributes['iscsi_name'] + properties['target_portals'] = volume_attributes['portal'] + properties['target_luns'] = lun + properties['volume_id'] = volume['id'] + properties['auth_method'] = '' + properties['auth_username'] = '' + properties['auth_password'] = '' + properties['discovery_auth_method'] = '' + properties['discovery_auth_username'] = '' + properties['discovery_auth_password'] = '' + return {'driver_volume_type': 'iscsi', 'data': properties} + + @utils.synchronized('Acs5000A-host', external=True) + def terminate_connection(self, volume, connector, **kwargs): + LOG.debug('terminate_connection: volume %(vol)s with connector ' + '%(conn)s', {'vol': volume['id'], 'conn': connector}) + info = {'driver_volume_type': 'iscsi', 'data': {}} + name = acs5000_common.VOLUME_PREFIX + volume['name'][-12:] + target = self.configuration.acs5000_target + # -1 means all lun maps + initiator = '-1' + if connector and connector['initiator']: + initiator = connector['initiator'] + self._cmd.delete_lun_map(name, + 'WITH_ISCSI', + initiator, + str(target)) + LOG.debug('leave: terminate_connection: volume %(vol)s with ' + 'connector %(conn)s', {'vol': volume['id'], + 'conn': connector}) + return info diff --git a/doc/source/configuration/block-storage/drivers/toyou-acs5000-driver.rst b/doc/source/configuration/block-storage/drivers/toyou-acs5000-driver.rst new file mode 100644 index 00000000000..303b6ba97c3 --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/toyou-acs5000-driver.rst @@ -0,0 +1,74 @@ +========================== +TOYOU ACS5000 iSCSI driver +========================== + +TOYOU ACS5000 series volume driver provides OpenStack Compute instances +with access to TOYOU ACS5000 series storage systems. + +TOYOU ACS5000 storage can be used with iSCSI connection. + +This documentation explains how to configure and connect the block storage +nodes to TOYOU ACS5000 series storage. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options supported by the +TOYOU ACS5000 iSCSI driver. + +.. config-table:: + :config-target: TOYOU ACS5000 + + cinder.volume.drivers.toyou.acs5000.acs5000_iscsi + cinder.volume.drivers.toyou.acs5000.acs5000_common + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +- Create, list, delete, attach (map), and detach (unmap) volumes. +- Create, list and delete volume snapshots. +- Create a volume from a snapshot. +- Copy an image to a volume. +- Copy a volume to an image. +- Clone a volume. +- Extend a volume. +- Migrate a volume. + +Configure TOYOU ACS5000 iSCSI backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section details the steps required to configure the TOYOU ACS5000 +storage cinder driver. + +#. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` + section, set the enabled_backends parameter. + + .. code-block:: ini + + [DEFAULT] + enabled_backends = ACS5000-1 + + +#. Add a backend group section for the backend group specified + in the enabled_backends parameter. + +#. In the newly created backend group section, set the + following configuration options: + + .. code-block:: ini + + [ACS5000-1] + # The driver path + volume_driver = cinder.volume.drivers.toyou.acs5000.acs5000_iscsi.Acs5000ISCSIDriver + # Management IP of TOYOU ACS5000 storage array + san_ip = 10.0.0.10 + # Management username of TOYOU ACS5000 storage array + san_login = cliuser + # Management password of TOYOU ACS5000 storage array + san_password = clipassword + # The Pool used to allocated volumes + acs5000_volpool_name = pool01 + # Lun map target,support 0 - 127 + acs5000_target = 0 + # Backend name + volume_backend_name = ACS5000 diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 9d27d399cac..fe9891e6990 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -168,6 +168,9 @@ title=StorPool Storage Driver (storpool) [driver.synology] title=Synology Storage Driver (iSCSI) +[driver.toyou] +title=TOYOU ACS5000 Storage Driver (iSCSI) + [driver.vrtsaccess] title=Veritas Access iSCSI Driver (iSCSI) @@ -250,6 +253,7 @@ driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=complete +driver.toyou=complete driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -314,6 +318,7 @@ driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=complete +driver.toyou=missing driver.vrtsaccess=complete driver.vrtscnfs=complete driver.vzstorage=complete @@ -378,6 +383,7 @@ driver.sandstone=missing driver.seagate=missing driver.storpool=missing driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -445,6 +451,7 @@ driver.sandstone=complete driver.seagate=missing driver.storpool=missing driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -511,6 +518,7 @@ driver.sandstone=complete driver.seagate=missing driver.storpool=complete driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -578,6 +586,7 @@ driver.sandstone=missing driver.seagate=missing driver.storpool=missing driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -644,6 +653,7 @@ driver.sandstone=complete driver.seagate=missing driver.storpool=complete driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -711,6 +721,7 @@ driver.sandstone=missing driver.seagate=missing driver.storpool=complete driver.synology=missing +driver.toyou=complete driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -778,6 +789,7 @@ driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -842,6 +854,7 @@ driver.sandstone=complete driver.seagate=missing driver.storpool=missing driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing @@ -910,6 +923,7 @@ driver.sandstone=complete driver.seagate=missing driver.storpool=missing driver.synology=missing +driver.toyou=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing diff --git a/releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml b/releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml new file mode 100644 index 00000000000..c8a3bf75e58 --- /dev/null +++ b/releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + New Cinder volume driver for TOYOU ACS5000. + The new driver supports iSCSI.