diff --git a/cinder/opts.py b/cinder/opts.py index 4a2ff85bdfd..6a4e57ffde0 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -113,6 +113,8 @@ from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat +from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \ + cinder_volume_drivers_infortrend_raidcmd_cli_commoncli from cinder.volume.drivers.inspur.as13000 import as13000_driver as \ cinder_volume_drivers_inspur_as13000_as13000driver from cinder.volume.drivers.inspur.instorage import instorage_common as \ @@ -252,6 +254,8 @@ def list_opts(): cinder_volume_driver.backup_opts, cinder_volume_driver.image_opts, cinder_volume_drivers_fusionstorage_dsware.volume_opts, + cinder_volume_drivers_infortrend_raidcmd_cli_commoncli. + infortrend_opts, cinder_volume_drivers_inspur_as13000_as13000driver. inspur_as13000_opts, cinder_volume_drivers_inspur_instorage_instoragecommon. diff --git a/cinder/tests/unit/volume/drivers/infortrend/__init__.py b/cinder/tests/unit/volume/drivers/infortrend/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py new file mode 100644 index 00000000000..2c44b1f80f7 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py @@ -0,0 +1,2670 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from cinder import test +from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli + + +class InfortrendCLITestData(object): + + """CLI Test Data.""" + + # Infortrend entry + fake_lv_id = ['5DE94FF775D81C30', '1234567890', 'HK3345678'] + + fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173', + '987654321', '123456789', + '2667FE351FC505AE', '53F3E98141A2E871'] + + fake_pair_id = ['55D790F8350B036B', '095A184B0ED2DB10'] + + fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB', + '4884610D11FD3335', '5C44BE0A776A2804'] + + fake_snapshot_name = ['9e8b27e9-568c-44ca-bd7c-2c7af96ab248', + '35e8ba6e-3372-4e67-8464-2b68758f3aeb', + 'f69696ea-26fc-4f4c-97335-e3ce33ee563', + 'cinder-unmanaged-f31d8326-c2d8-4668-'] + + fake_data_port_ip = ['172.27.0.1', '172.27.0.2', + '172.27.0.3', '172.27.0.4', + '172.27.0.5', '172.27.0.6'] + + fake_model = ['DS S12F-G2852-6'] + + fake_manage_port_ip = ['172.27.0.10'] + + fake_system_id = ['DEEC'] + + fake_host_ip = ['172.27.0.2'] + + fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC'] + + fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC', + '110123D02310DEEC', '120123D02310DEEC'] + + fake_initiator_wwnns = ['2234567890123456', '2234567890543216'] + + fake_initiator_wwpns = ['1234567890123456', '1234567890543216'] + + fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123', + 'iqn.1991-05.com.infortrend:pc456'] + + fake_lun_map = [0, 1, 2] + + # cinder entry + test_provider_location = [( + 'system_id^%s@partition_id^%s') % ( + int(fake_system_id[0], 16), fake_partition_id[0]), ( + 'system_id^%s@partition_id^%s') % ( + int(fake_system_id[0], 16), fake_partition_id[1]) + ] + + test_volume = { + 'id': '5aa119a8-d25b-45a7-8d1b-88e127885635', + 'size': 1, + 'name': 'Part-1', + 'host': 'infortrend-server1@backend_1#LV-1', + 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': None, + 'display_description': 'Part-1', + 'volume_type_id': None, + 'status': 'available', + 'provider_location': test_provider_location[0], + 'volume_attachment': [], + } + + test_volume_1 = { + 'id': '5aa119a8-d25b-45a7-8d1b-88e127885634', + 'size': 1, + 'name': 'Part-1', + 'host': 'infortrend-server1@backend_1#LV-1', + 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': None, + 'display_description': 'Part-1', + 'volume_type_id': None, + 'status': 'in-use', + 'provider_location': test_provider_location[1], + 'volume_attachment': [], + } + + test_dst_volume = { + 'id': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'size': 1, + 'name': 'Part-1-Copy', + 'host': 'infortrend-server1@backend_1#LV-1', + 'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': None, + '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'display_description': 'Part-1-Copy', + 'volume_type_id': None, + 'provider_location': '', + 'volume_attachment': [], + } + + test_ref_volume = { + 'source-id': fake_partition_id[0], + 'size': 1, + } + + test_ref_volume_with_id = { + 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'size': 1, + } + + test_ref_volume_with_name = { + 'source-name': 'import_into_openstack', + 'size': 1, + } + + test_snapshot = { + 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', + 'volume_id': test_volume['id'], + 'volume_name': test_volume['name'], + 'volume_size': 2, + 'project_id': 'project', + 'display_name': None, + 'display_description': 'SI-1', + 'volume_type_id': None, + 'provider_location': fake_snapshot_id[0], + } + test_snapshot_without_provider_location = { + 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', + 'volume_id': test_volume['id'], + 'volume_name': test_volume['name'], + 'volume_size': 2, + 'project_id': 'project', + 'display_name': None, + 'display_description': 'SI-1', + 'volume_type_id': None, + } + + test_iqn = [( + 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( + int(fake_system_id[0], 16), 1, 0, 1), ( + 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( + int(fake_system_id[0], 16), 1, 0, 1), ( + 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( + int(fake_system_id[0], 16), 2, 0, 1), + ] + + test_iscsi_properties = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'target_portal': '%s:3260' % fake_data_port_ip[2], + 'target_iqn': test_iqn[0], + 'target_lun': fake_lun_map[0], + 'volume_id': test_volume['id'], + }, + } + + test_iscsi_properties_with_mcs = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'target_portal': '%s:3260' % fake_data_port_ip[4], + 'target_iqn': test_iqn[2], + 'target_lun': fake_lun_map[0], + 'volume_id': test_volume['id'], + }, + } + + test_iscsi_properties_with_mcs_1 = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'target_portal': '%s:3260' % fake_data_port_ip[4], + 'target_iqn': test_iqn[2], + 'target_lun': fake_lun_map[1], + 'volume_id': test_volume_1['id'], + }, + } + + test_iqn_empty_map = [( + 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( + int(fake_system_id[0], 16), 0, 0, 1), + ] + + test_iscsi_properties_empty_map = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'target_portal': '%s:3260' % fake_data_port_ip[0], + 'target_iqn': test_iqn_empty_map[0], + 'target_lun': fake_lun_map[0], + 'volume_id': test_volume['id'], + }, + } + + test_initiator_target_map = { + fake_initiator_wwpns[0]: fake_target_wwpns[0:2], + fake_initiator_wwpns[1]: fake_target_wwpns[0:2], + } + + test_fc_properties = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': fake_lun_map[0], + 'target_wwn': fake_target_wwpns[0:2], + 'initiator_target_map': test_initiator_target_map, + }, + } + + test_initiator_target_map_specific_channel = { + fake_initiator_wwpns[0]: [fake_target_wwpns[1]], + fake_initiator_wwpns[1]: [fake_target_wwpns[1]], + } + + test_fc_properties_with_specific_channel = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': fake_lun_map[0], + 'target_wwn': [fake_target_wwpns[1]], + 'initiator_target_map': test_initiator_target_map_specific_channel, + }, + } + + test_target_wwpns_map_multipath_r_model = [ + fake_target_wwpns[0], + fake_target_wwpns[2], + fake_target_wwpns[1], + fake_target_wwpns[3], + ] + + test_initiator_target_map_multipath_r_model = { + fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:], + fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:], + } + + test_fc_properties_multipath_r_model = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': fake_lun_map[0], + 'target_wwn': test_target_wwpns_map_multipath_r_model[:], + 'initiator_target_map': + test_initiator_target_map_multipath_r_model, + }, + } + + test_initiator_target_map_zoning = { + fake_initiator_wwpns[0].lower(): + [x.lower() for x in fake_target_wwpns[0:2]], + fake_initiator_wwpns[1].lower(): + [x.lower() for x in fake_target_wwpns[0:2]], + } + + test_fc_properties_zoning = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': fake_lun_map[0], + 'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]], + 'initiator_target_map': test_initiator_target_map_zoning, + }, + } + + test_initiator_target_map_zoning_r_model = { + fake_initiator_wwpns[0].lower(): + [x.lower() for x in fake_target_wwpns[1:3]], + fake_initiator_wwpns[1].lower(): + [x.lower() for x in fake_target_wwpns[1:3]], + } + + test_fc_properties_zoning_r_model = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': fake_lun_map[0], + 'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]], + 'initiator_target_map': test_initiator_target_map_zoning_r_model, + }, + } + + test_fc_terminate_conn_info = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'initiator_target_map': test_initiator_target_map_zoning, + }, + } + + test_connector_iscsi = { + 'ip': fake_host_ip[0], + 'initiator': fake_initiator_iqn[0], + 'host': 'infortrend-server1@backend_1', + } + + test_connector_iscsi_1 = { + 'ip': fake_host_ip[0], + 'initiator': fake_initiator_iqn[1], + 'host': 'infortrend-server1@backend_1', + } + + test_connector_fc = { + 'wwpns': fake_initiator_wwpns, + 'wwnns': fake_initiator_wwnns, + 'host': 'infortrend-server1@backend_1', + } + + fake_pool = { + 'pool_name': 'LV-2', + 'pool_id': fake_lv_id[1], + 'total_capacity_gb': 1000, + 'free_capacity_gb': 1000, + 'reserved_percentage': 0, + 'QoS_support': False, + 'thin_provisioning_support': False, + } + + test_pools_full = [{ + 'pool_name': 'LV-1', + 'pool_id': fake_lv_id[0], + 'location_info': 'Infortrend:' + fake_system_id[0], + 'total_capacity_gb': round(857982.0 / 1024, 2), + 'free_capacity_gb': round(841978.0 / 1024, 2), + 'reserved_percentage': 0, + 'QoS_support': False, + 'thick_provisioning_support': True, + 'thin_provisioning_support': False, + }] + + test_volume_states_full = { + 'volume_backend_name': 'infortrend_backend_1', + 'vendor_name': 'Infortrend', + 'driver_version': '99.99', + 'storage_protocol': 'iSCSI', + 'model_type': 'R', + 'status': 'Connected', + 'system_id': fake_system_id[0], + 'pools': test_pools_full, + } + + test_pools_thin = [{ + 'pool_name': 'LV-1', + 'pool_id': fake_lv_id[0], + 'location_info': 'Infortrend:' + fake_system_id[0], + 'total_capacity_gb': round(857982.0 / 1024, 2), + 'free_capacity_gb': round(841978.0 / 1024, 2), + 'reserved_percentage': 0, + 'QoS_support': False, + 'thick_provisioning_support': True, + 'thin_provisioning_support': True, + 'provisioned_capacity_gb': + round((40000) / 1024, 2), + 'max_over_subscription_ratio': 20.0, + }] + + test_volume_states_thin = { + 'volume_backend_name': 'infortrend_backend_1', + 'vendor_name': 'Infortrend', + 'driver_version': '99.99', + 'storage_protocol': 'iSCSI', + 'model_type': 'R', + 'status': 'Connected', + 'system_id': fake_system_id[0], + 'pools': test_pools_thin, + } + + test_host = { + 'host': 'infortrend-server1@backend_1', + 'capabilities': test_volume_states_thin, + } + + test_migrate_volume_states = { + 'volume_backend_name': 'infortrend_backend_1', + 'vendor_name': 'Infortrend', + 'driver_version': '99.99', + 'storage_protocol': 'iSCSI', + 'pool_name': 'LV-1', + 'pool_id': fake_lv_id[1], + 'location_info': 'Infortrend:' + fake_system_id[0], + 'total_capacity_gb': round(857982.0 / 1024, 2), + 'free_capacity_gb': round(841978.0 / 1024, 2), + 'reserved_percentage': 0, + 'QoS_support': False, + } + + test_migrate_host = { + 'host': 'infortrend-server1@backend_1#LV-2', + 'capabilities': test_migrate_volume_states, + } + + test_migrate_volume_states_2 = { + 'volume_backend_name': 'infortrend_backend_1', + 'vendor_name': 'Infortrend', + 'driver_version': '99.99', + 'storage_protocol': 'iSCSI', + 'pool_name': 'LV-1', + 'pool_id': fake_lv_id[1], + 'location_info': 'Infortrend:' + fake_system_id[0], + 'total_capacity_gb': round(857982.0 / 1024, 2), + 'free_capacity_gb': round(841978.0 / 1024, 2), + 'reserved_percentage': 0, + 'QoS_support': False, + } + + test_migrate_host_2 = { + 'host': 'infortrend-server1@backend_1#LV-1', + 'capabilities': test_migrate_volume_states_2, + } + + fake_host = { + 'host': 'infortrend-server1@backend_1', + 'capabilities': {}, + } + + fake_volume_id = [test_volume['id'], test_dst_volume['id']] + + fake_lookup_map = { + '12345678': { + 'initiator_port_wwn_list': + [x.lower() for x in fake_initiator_wwpns], + 'target_port_wwn_list': + [x.lower() for x in fake_target_wwpns[0:2]], + }, + } + + fake_lookup_map_r_model = { + '12345678': { + 'initiator_port_wwn_list': + [x.lower() for x in fake_initiator_wwpns[:]], + 'target_port_wwn_list': + [x.lower() for x in fake_target_wwpns[1:3]], + }, + } + + test_new_type = { + 'name': 'type0', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': {'infortrend:provisioning': 'thin'}, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + + test_diff = {'extra_specs': {'infortrend:provisioning': ('full', 'thin')}} + + def get_fake_cli_failed(self): + return """ +ift cli command +CLI: No selected device +Return: 0x000c + +RAIDCmd:> +""" + + def get_fake_cli_failed_with_network(self): + return """ +ift cli command +CLI: Not exist: There is no such partition: 3345678 +Return: 0x000b + +RAIDCmd:> +""" + + def get_fake_cli_succeed(self): + return """ +ift cli command +CLI: Successful: 0 mapping(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + + def get_test_show_empty_list(self): + return (0, []) + + def get_test_show_snapshot(self, partition_id=None, snapshot_id=None): + if partition_id and snapshot_id: + return (0, [{ + 'Map': 'No', + 'Partition-ID': partition_id, + 'SI-ID': snapshot_id, + 'Name': '---', + 'Activated-time': 'Thu, Jan 09 01:33:11 2020', + 'Index': '1', + }]) + else: + return (0, [{ + 'Map': 'No', + 'Partition-ID': self.fake_partition_id[0], + 'SI-ID': self.fake_snapshot_id[0], + 'Name': '---', + 'Activated-time': 'Thu, Jan 09 01:33:11 2020', + 'Index': '1', + }, { + 'Map': 'No', + 'Partition-ID': self.fake_partition_id[0], + 'SI-ID': self.fake_snapshot_id[1], + 'Name': '---', + 'Activated-time': 'Thu, Jan 09 01:35:50 2020', + 'Index': '2', + }]) + + def get_test_show_snapshot_named(self): + return (0, [{ + 'Map': 'No', + 'Partition-ID': self.fake_partition_id[0], + 'SI-ID': self.fake_snapshot_id[0], + 'Name': self.fake_snapshot_name[0], + 'Activated-time': 'Thu, Jan 09 01:33:11 2020', + 'Index': '1', + }, { + 'Map': 'No', + 'Partition-ID': self.fake_partition_id[1], + 'SI-ID': self.fake_snapshot_id[1], + 'Name': self.fake_snapshot_name[1], + 'Activated-time': 'Thu, Jan 09 01:35:50 2020', + 'Index': '1', + }]) + + def get_fake_show_snapshot(self): + msg = r""" +show si +\/\/\/- +\ +/ +- + +\ +/ +- +\/-\/- Index SI-ID Name Partition-ID Map Activated-time +--------------------------------------------------------------------------------- + 1 %s --- %s No Thu, Jan 09 01:33:11 2020 + 2 %s --- %s No Thu, Jan 09 01:35:50 2020 + +CLI: Successful: 2 snapshot image(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_snapshot_id[0], + self.fake_partition_id[0], + self.fake_snapshot_id[1], + self.fake_partition_id[0]) + + def get_test_show_snapshot_detail_filled_block(self): + return (0, [{ + 'Mapped': 'Yes', + 'Created-time': 'Wed, Jun 10 10:57:16 2015', + 'ID': self.fake_snapshot_id[0], + 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', + 'Description': '---', + 'Total-filled-block': '1', + 'LV-ID': self.fake_lv_id[0], + 'Activation-schedule-time': 'Not Actived', + 'Mapping': 'CH:0/ID:0/LUN:1', + 'Index': '1', + 'Used': '0', + 'Name': '---', + 'Valid-filled-block': '0', + 'Partition-ID': self.fake_partition_id[0], + }]) + + def get_test_show_snapshot_detail(self): + return (0, [{ + 'Mapped': 'Yes', + 'Created-time': 'Wed, Jun 10 10:57:16 2015', + 'ID': self.fake_snapshot_id[0], + 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', + 'Description': '---', + 'Total-filled-block': '0', + 'LV-ID': self.fake_lv_id[0], + 'Activation-schedule-time': 'Not Actived', + 'Mapping': 'CH:0/ID:0/LUN:1', + 'Index': '1', + 'Used': '0', + 'Name': '---', + 'Valid-filled-block': '0', + 'Partition-ID': self.fake_partition_id[0], + }]) + + def get_test_show_snapshot_get_manage(self): + """Show 4 si for api `list si`: 1.Mapped 2.Managed 3.Free 4.WrongLV""" + + return (0, [{ + 'ID': self.fake_snapshot_id[0], + 'Index': '1', + 'Name': self.fake_snapshot_name[0], + 'Partition-ID': self.fake_partition_id[0], + 'LV-ID': self.fake_lv_id[0], + 'Created-time': 'Fri, Dec 23 07:54:33 2016', + 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', + 'Activated-time': 'Fri, Dec 23 08:29:41 2016', + 'Activation-schedule-time': 'Not Actived', + 'Used': '0', + 'Valid-filled-block': '0', + 'Total-filled-block': '0', + 'Description': '---', + 'Mapped': 'No', + 'Mapping': '---', + 'Backup-to-Cloud': 'false', + 'Status': 'OK', + 'Progress': '---', + }, { + 'ID': self.fake_snapshot_id[1], + 'Index': '2', + 'Name': self.fake_snapshot_name[1], + 'Partition-ID': self.fake_partition_id[1], + 'LV-ID': self.fake_lv_id[0], + 'Created-time': 'Fri, Dec 23 07:54:33 2016', + 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', + 'Activated-time': 'Fri, Dec 23 08:29:41 2016', + 'Activation-schedule-time': 'Not Actived', + 'Used': '0', + 'Valid-filled-block': '0', + 'Total-filled-block': '0', + 'Description': '---', + 'Mapped': 'No', + 'Mapping': '---', + 'Backup-to-Cloud': 'false', + 'Status': 'OK', + 'Progress': '---' + }, { + 'ID': self.fake_snapshot_id[2], + 'Index': '1', + 'Name': self.fake_snapshot_name[2], + 'Partition-ID': self.fake_partition_id[2], + 'LV-ID': self.fake_lv_id[1], + 'Created-time': 'Fri, Dec 23 07:54:33 2016', + 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', + 'Activated-time': 'Fri, Dec 23 08:29:41 2016', + 'Activation-schedule-time': 'Not Actived', + 'Used': '0', + 'Valid-filled-block': '0', + 'Total-filled-block': '0', + 'Description': '---', + 'Mapped': 'No', + 'Mapping': '---', + 'Backup-to-Cloud': 'false', + 'Status': 'OK', + 'Progress': '---', + }, { + 'ID': self.fake_snapshot_id[3], + 'Index': '1', + 'Name': 'test-get-snapshot-list', + # Part ID from get_test_show_partition_detail() + 'Partition-ID': '123123123123', + 'LV-ID': '987654321', + 'Created-time': 'Fri, Dec 23 07:54:33 2016', + 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', + 'Activated-time': 'Fri, Dec 23 08:29:41 2016', + 'Activation-schedule-time': 'Not Actived', + 'Used': '0', + 'Valid-filled-block': '0', + 'Total-filled-block': '0', + 'Description': '---', + 'Mapped': 'No', + 'Mapping': '---', + 'Backup-to-Cloud': 'false', + 'Status': 'OK', + 'Progress': '---' + }]) + + def get_fake_show_snapshot_detail(self): + msg = """ +show si -l + ID: %s + Index: 1 + Name: --- + Partition-ID: %s + LV-ID: %s + Created-time: Wed, Jun 10 10:57:16 2015 + Last-modification-time: Wed, Jun 10 10:57:16 2015 + Activation-schedule-time: Not Actived + Used: 0 + Valid-filled-block: 0 + Total-filled-block: 0 + Description: --- + Mapped: Yes + Mapping: CH:0/ID:0/LUN:1 + +CLI: Successful: 1 snapshot image(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_snapshot_id[0], + self.fake_partition_id[0], + self.fake_lv_id[0]) + + def get_test_show_net(self): + return (0, [{ + 'Slot': 'slotA', + 'MAC': '10D02380DEEC', + 'ID': '1', + 'IPv4': self.fake_data_port_ip[0], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': 'slotB', + 'MAC': '10D02390DEEC', + 'ID': '1', + 'IPv4': self.fake_data_port_ip[1], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': 'slotA', + 'MAC': '10D02340DEEC', + 'ID': '2', + 'IPv4': self.fake_data_port_ip[2], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': 'slotB', + 'MAC': '10D02350DEEC', + 'ID': '2', + 'IPv4': self.fake_data_port_ip[3], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': 'slotA', + 'MAC': '10D02310DEEC', + 'ID': '4', + 'IPv4': self.fake_data_port_ip[4], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': 'slotB', + 'MAC': '10D02320DEEC', + 'ID': '4', + 'IPv4': self.fake_data_port_ip[5], + 'Mode': 'Disabled', + 'IPv6': '---', + }, { + 'Slot': '---', + 'MAC': '10D023077124', + 'ID': '32', + 'IPv4': '172.27.1.1', + 'Mode': 'Disabled', + 'IPv6': '---', + }]) + + def get_fake_show_net(self): + msg = """ +show net + ID MAC Mode IPv4 Mode IPv6 Slot +--------------------------------------------------------------- + 1 10D02380DEEC DHCP %s Disabled --- slotA + 1 10D02390DEEC DHCP %s Disabled --- slotB + 2 10D02340DEEC DHCP %s Disabled --- slotA + 2 10D02350DEEC DHCP %s Disabled --- slotB + 4 10D02310DEEC DHCP %s Disabled --- slotA + 4 10D02320DEEC DHCP %s Disabled --- slotB + 32 10D023077124 DHCP 172.27.1.1 Disabled --- --- + +CLI: Successful: 2 record(s) found +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1], + self.fake_data_port_ip[2], self.fake_data_port_ip[3], + self.fake_data_port_ip[4], self.fake_data_port_ip[5]) + + def get_test_show_net_detail(self): + return (0, [{ + 'Slot': 'slotA', + 'IPv4-mode': 'DHCP', + 'ID': '1', + 'IPv6-address': '---', + 'Net-mask': '---', + 'IPv4-address': '---', + 'Route': '---', + 'Gateway': '---', + 'IPv6-mode': 'Disabled', + 'MAC': '00D023877124', + 'Prefix-length': '---', + }, { + 'Slot': '---', + 'IPv4-mode': 'DHCP', + 'ID': '32', + 'IPv6-address': '---', + 'Net-mask': '255.255.240.0', + 'IPv4-address': '172.27.112.245', + 'Route': '---', + 'Gateway': '172.27.127.254', + 'IPv6-mode': 'Disabled', + 'MAC': '00D023077124', + 'Prefix-length': '---', + }]) + + def get_fake_show_net_detail(self): + msg = """ +show net -l + ID: 1 + MAC: 00D023877124 + IPv4-mode: DHCP + IPv4-address: --- + Net-mask: --- + Gateway: --- + IPv6-mode: Disabled + IPv6-address: --- + Prefix-length: --- + Route: --- + Slot: slotA + + ID: 32 + MAC: 00D023077124 + IPv4-mode: DHCP + IPv4-address: 172.27.112.245 + Net-mask: 255.255.240.0 + Gateway: 172.27.127.254 + IPv6-mode: Disabled + IPv6-address: --- + Prefix-length: --- + Route: --- + Slot: --- + +CLI: Successful: 3 record(s) found +Return: 0x0000 + +RAIDCmd:> +""" + return msg + + def get_test_show_partition(self, volume_id=None, pool_id=None): + result = [{ + 'ID': self.fake_partition_id[0], + 'Used': '20000', + 'Name': self.fake_volume_id[0], + 'Size': '20000', + 'Min-reserve': '20000', + 'LV-ID': self.fake_lv_id[0], + }, { + 'ID': self.fake_partition_id[1], + 'Used': '20000', + 'Name': self.fake_volume_id[1], + 'Size': '20000', + 'Min-reserve': '20000', + 'LV-ID': self.fake_lv_id[0], + }] + if volume_id and pool_id: + result.append({ + 'ID': self.fake_partition_id[2], + 'Used': '20000', + 'Name': volume_id, + 'Size': '20000', + 'Min-reserve': '20000', + 'LV-ID': pool_id, + }) + return (0, result) + + def get_fake_show_partition(self): + msg = """ +show part + ID Name LV-ID Size Used Min-reserve +--------------------------------------------------- + %s %s %s 20000 20000 20000 + %s %s %s 20000 20000 20000 + +CLI: Successful: 3 partition(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_partition_id[0], + self.fake_volume_id[0], + self.fake_lv_id[0], + self.fake_partition_id[1], + self.fake_volume_id[1], + self.fake_lv_id[0]) + + def get_test_show_partition_detail_for_map( + self, partition_id, mapped='true'): + result = [{ + 'LV-ID': self.fake_lv_id[0], + 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1', + 'Used': '20000', + 'Size': '20000', + 'ID': partition_id, + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', + 'Valid-filled-block': '100', + 'Name': self.fake_volume_id[0], + 'Mapped': mapped, + 'Total-filled-block': '100', + 'Creation-time': 'Wed, Jan 08 20:23:23 2020', + }] + return (0, result) + + def get_test_show_partition_detail(self, volume_id=None, pool_id=None): + result = [{ + 'LV-ID': self.fake_lv_id[0], + 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0', + 'Used': '20000', + 'Size': '20000', + 'ID': self.fake_partition_id[0], + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', + 'Valid-filled-block': '100', + 'Name': self.fake_volume_id[0], + 'Mapped': 'true', + 'Total-filled-block': '100', + 'Creation-time': 'Wed, Jan 08 20:23:23 2020', + }, { + 'LV-ID': self.fake_lv_id[0], + 'Mapping': '---', + 'Used': '20000', + 'Size': '20000', + 'ID': self.fake_partition_id[1], + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Sat, Jan 11 22:18:40 2020', + 'Valid-filled-block': '100', + 'Name': self.fake_volume_id[1], + 'Mapped': 'false', + 'Total-filled-block': '100', + 'Creation-time': 'Sat, Jan 11 22:18:40 2020', + }] + if volume_id and pool_id: + result.extend([{ + 'LV-ID': pool_id, + 'Mapping': '---', + 'Used': '20000', + 'Size': '20000', + 'ID': self.fake_partition_id[2], + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Sat, Jan 15 22:18:40 2020', + 'Valid-filled-block': '100', + 'Name': volume_id, + 'Mapped': 'false', + 'Total-filled-block': '100', + 'Creation-time': 'Sat, Jan 15 22:18:40 2020', + }, { + 'LV-ID': '987654321', + 'Mapping': '---', + 'Used': '20000', + 'Size': '20000', + 'ID': '123123123123', + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Sat, Jan 12 22:18:40 2020', + 'Valid-filled-block': '100', + 'Name': volume_id, + 'Mapped': 'false', + 'Total-filled-block': '100', + 'Creation-time': 'Sat, Jan 15 22:18:40 2020', + }, { + 'LV-ID': self.fake_lv_id[0], + 'Mapping': '---', + 'Used': '20000', + 'Size': '20000', + 'ID': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'Progress': '---', + 'Min-reserve': '20000', + 'Last-modification-time': 'Sat, Jan 16 22:18:40 2020', + 'Valid-filled-block': '100', + 'Name': volume_id, + 'Mapped': 'false', + 'Total-filled-block': '100', + 'Creation-time': 'Sat, Jan 14 22:18:40 2020', + }]) + return (0, result) + + def get_fake_show_partition_detail(self): + msg = """ +show part -l + ID: %s + Name: %s + LV-ID: %s + Size: 20000 + Used: 20000 + Min-reserve: 20000 + Creation-time: Wed, Jan 08 20:23:23 2020 + Last-modification-time: Wed, Jan 08 20:23:23 2020 + Valid-filled-block: 100 + Total-filled-block: 100 + Progress: --- + Mapped: true + Mapping: CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0 + + ID: %s + Name: %s + LV-ID: %s + Size: 20000 + Used: 20000 + Min-reserve: 20000 + Creation-time: Sat, Jan 11 22:18:40 2020 + Last-modification-time: Sat, Jan 11 22:18:40 2020 + Valid-filled-block: 100 + Total-filled-block: 100 + Progress: --- + Mapped: false + Mapping: --- + +CLI: Successful: 3 partition(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_partition_id[0], + self.fake_volume_id[0], + self.fake_lv_id[0], + self.fake_partition_id[1], + self.fake_volume_id[1], + self.fake_lv_id[0]) + + def get_test_show_replica_detail_for_migrate( + self, src_part_id, dst_part_id, volume_id, status='Completed'): + result = [{ + 'Pair-ID': self.fake_pair_id[0], + 'Name': 'Cinder-Snapshot', + 'Source-Device': 'DEEC', + 'Source': src_part_id, + 'Source-Type': 'LV-Partition', + 'Source-Name': volume_id, + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'Yes', + 'Target-Device': 'DEEC', + 'Target': dst_part_id, + 'Target-Type': 'LV-Partition', + 'Target-Name': volume_id, + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '033EA1FA4EA193EB', + 'Target-Mapped': 'No', + 'Type': 'Copy', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': status, + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }] + return (0, result) + + def get_test_show_replica_detail_for_si_sync_pair(self): + result = [{ + 'Pair-ID': self.fake_pair_id[0], + 'Name': 'Cinder-Snapshot', + 'Source-Device': 'DEEC', + 'Source': self.fake_snapshot_id[0], + 'Source-Type': 'LV-Partition', + 'Source-Name': '', + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'Yes', + 'Target-Device': 'DEEC', + 'Target': self.fake_partition_id[1], + 'Target-Type': 'LV-Partition', + 'Target-Name': '', + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '033EA1FA4EA193EB', + 'Target-Mapped': 'No', + 'Type': 'Copy', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': 'Copy', + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }] + return (0, result) + + def get_test_show_replica_detail_for_sync_pair(self): + result = [{ + 'Pair-ID': self.fake_pair_id[0], + 'Name': 'Cinder-Snapshot', + 'Source-Device': 'DEEC', + 'Source': self.fake_partition_id[0], + 'Source-Type': 'LV-Partition', + 'Source-Name': self.fake_volume_id[0], + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'Yes', + 'Target-Device': 'DEEC', + 'Target': self.fake_partition_id[1], + 'Target-Type': 'LV-Partition', + 'Target-Name': self.fake_volume_id[1], + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '033EA1FA4EA193EB', + 'Target-Mapped': 'No', + 'Type': 'Copy', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': 'Copy', + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }] + return (0, result) + + def get_test_show_replica_detail(self): + result = [{ + 'Pair-ID': '4BF246E26966F015', + 'Name': 'Cinder-Snapshot', + 'Source-Device': 'DEEC', + 'Source': self.fake_partition_id[2], + 'Source-Type': 'LV-Partition', + 'Source-Name': 'Part-2', + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'No', + 'Target-Device': 'DEEC', + 'Target': self.fake_partition_id[3], + 'Target-Type': 'LV-Partition', + 'Target-Name': 'Part-1-Copy', + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '714B80F0335F6E52', + 'Target-Mapped': 'No', + 'Type': 'Copy', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': 'Completed', + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }, { + 'Pair-ID': self.fake_pair_id[0], + 'Name': 'Cinder-Migrate', + 'Source-Device': 'DEEC', + 'Source': self.fake_partition_id[0], + 'Source-Type': 'LV-Partition', + 'Source-Name': self.fake_volume_id[0], + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'Yes', + 'Target-Device': 'DEEC', + 'Target': self.fake_partition_id[1], + 'Target-Type': 'LV-Partition', + 'Target-Name': self.fake_volume_id[1], + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '033EA1FA4EA193EB', + 'Target-Mapped': 'No', + 'Type': 'Mirror', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': 'Mirror', + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }, { + 'Pair-ID': self.fake_pair_id[1], + 'Name': 'Cinder-Migrate', + 'Source-Device': 'DEEC', + 'Source': self.fake_partition_id[4], + 'Source-Type': 'LV-Partition', + 'Source-Name': self.fake_volume_id[0], + 'Source-LV': '5DE94FF775D81C30', + 'Source-VS': '2C482316298F7A4E', + 'Source-Mapped': 'No', + 'Target-Device': 'DEEC', + 'Target': self.fake_partition_id[5], + 'Target-Type': 'LV-Partition', + 'Target-Name': self.fake_volume_id[1], + 'Target-LV': '5DE94FF775D81C30', + 'Target-VS': '714B80F0335F6E52', + 'Target-Mapped': 'Yes', + 'Type': 'Mirror', + 'Priority': 'Normal', + 'Timeout': '---', + 'Incremental': '---', + 'Compression': '---', + 'Status': 'Mirror', + 'Progress': '---', + 'Created-time': '01/11/2020 22:20 PM', + 'Sync-commence-time': '01/11/2020 22:20 PM', + 'Split-time': '01/11/2020 22:20 PM', + 'Completed-time': '01/11/2020 22:21 PM', + 'Description': '---', + }] + return (0, result) + + def get_fake_show_replica_detail(self): + msg = """ +show replica -l + Pair-ID: 4BF246E26966F015 + Name: Cinder-Snapshot + Source-Device: DEEC + Source: %s + Source-Type: LV-Partition + Source-Name: Part-2 + Source-LV: 5DE94FF775D81C30 + Source-VS: 2C482316298F7A4E + Source-Mapped: No + Target-Device: DEEC + Target: %s + Target-Type: LV-Partition + Target-Name: Part-1-Copy + Target-LV: 5DE94FF775D81C30 + Target-VS: 714B80F0335F6E52 + Target-Mapped: No + Type: Copy + Priority: Normal + Timeout: --- + Incremental: --- + Compression: --- + Status: Completed + Progress: --- + Created-time: 01/11/2020 22:20 PM + Sync-commence-time: 01/11/2020 22:20 PM + Split-time: 01/11/2020 22:20 PM + Completed-time: 01/11/2020 22:21 PM + Description: --- + + Pair-ID: %s + Name: Cinder-Migrate + Source-Device: DEEC + Source: %s + Source-Type: LV-Partition + Source-Name: %s + Source-LV: 5DE94FF775D81C30 + Source-VS: 2C482316298F7A4E + Source-Mapped: Yes + Target-Device: DEEC + Target: %s + Target-Type: LV-Partition + Target-Name: %s + Target-LV: 5DE94FF775D81C30 + Target-VS: 033EA1FA4EA193EB + Target-Mapped: No + Type: Mirror + Priority: Normal + Timeout: --- + Incremental: --- + Compression: --- + Status: Mirror + Progress: --- + Created-time: 01/11/2020 22:20 PM + Sync-commence-time: 01/11/2020 22:20 PM + Split-time: 01/11/2020 22:20 PM + Completed-time: 01/11/2020 22:21 PM + Description: --- + + Pair-ID: %s + Name: Cinder-Migrate + Source-Device: DEEC + Source: %s + Source-Type: LV-Partition + Source-Name: %s + Source-LV: 5DE94FF775D81C30 + Source-VS: 2C482316298F7A4E + Source-Mapped: No + Target-Device: DEEC + Target: %s + Target-Type: LV-Partition + Target-Name: %s + Target-LV: 5DE94FF775D81C30 + Target-VS: 714B80F0335F6E52 + Target-Mapped: Yes + Type: Mirror + Priority: Normal + Timeout: --- + Incremental: --- + Compression: --- + Status: Mirror + Progress: --- + Created-time: 01/11/2020 22:20 PM + Sync-commence-time: 01/11/2020 22:20 PM + Split-time: 01/11/2020 22:20 PM + Completed-time: 01/11/2020 22:21 PM + Description: --- + +CLI: Successful: 3 replication job(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_partition_id[2], + self.fake_partition_id[3], + self.fake_pair_id[0], + self.fake_partition_id[0], + self.fake_volume_id[0], + self.fake_partition_id[1], + self.fake_volume_id[1], + self.fake_pair_id[1], + self.fake_partition_id[4], + self.fake_volume_id[0], + self.fake_partition_id[5], + self.fake_volume_id[1]) + + def get_test_show_lv(self): + return (0, [{ + 'Name': 'LV-1', + 'LD-amount': '1', + 'Available': '841978 MB', + 'ID': self.fake_lv_id[0], + 'Progress': '---', + 'Size': '857982 MB', + 'Status': 'On-line', + }]) + + def get_fake_show_lv(self): + msg = """ +show lv + ID Name LD-amount Size Available Progress Status +-------------------------------------------------------------- + %s LV-1 1 857982 MB 841978 MB --- On-line + +CLI: Successful: 1 Logical Volumes(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % self.fake_lv_id[0] + + def get_test_show_lv_detail(self): + return (0, [{ + 'Policy': 'Default', + 'Status': 'On-line', + 'ID': self.fake_lv_id[0], + 'Available': '841978 MB', + 'Expandable-size': '0 MB', + 'Name': 'LV-1', + 'Size': '857982 MB', + 'LD-amount': '1', + 'Progress': '---', + }]) + + def get_fake_show_lv_detail(self): + msg = """ +show lv -l + ID: %s + Name: LV-1 + LD-amount: 1 + Size: 857982 MB + Available: 841978 MB + Expandable-size: 0 MB + Policy: Default + Progress: --- + Status: On-line + +CLI: Successful: 1 Logical Volumes(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % self.fake_lv_id[0] + + def get_test_show_lv_tier_for_migration(self): + return (0, [{ + 'LV-Name': 'LV-1', + 'LV-ID': self.fake_lv_id[1], + 'Tier': '0', + 'Size': '418.93 GB', + 'Used': '10 GB(2.4%)', + 'Data Service': '0 MB(0.0%)', + 'Reserved Ratio': '10.0%', + }, { + 'LV-Name': 'LV-1', + 'LV-ID': self.fake_lv_id[1], + 'Tier': '3', + 'Size': '931.02 GB', + 'Used': '0 MB(0.0%)', + 'Data Service': '0 MB(0.0%)', + 'Reserved Ratio': '0.0%', + }]) + + def get_test_show_lv_tier(self): + return (0, [{ + 'LV-Name': 'LV-1', + 'LV-ID': self.fake_lv_id[0], + 'Tier': '0', + 'Size': '418.93 GB', + 'Used': '10 GB(2.4%)', + 'Data Service': '0 MB(0.0%)', + 'Reserved Ratio': '10.0%', + }, { + 'LV-Name': 'LV-1', + 'LV-ID': self.fake_lv_id[0], + 'Tier': '3', + 'Size': '931.02 GB', + 'Used': '0 MB(0.0%)', + 'Data Service': '0 MB(0.0%)', + 'Reserved Ratio': '0.0%', + }]) + + def get_fake_show_lv_tier(self): + msg = """ +show lv tier + LV-Name LV-ID Tier Size Used Data Service Reserved Ratio +------------------------------------------------------------------------------ + LV-1 %s 0 418.93 GB 10 GB(2.4%%) 0 MB(0.0%%) 10.0%% + LV-1 %s 3 931.02 GB 0 MB(0.0%%) 0 MB(0.0%%) 0.0%% + +CLI: Successful: 2 lv tiering(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_lv_id[0], + self.fake_lv_id[0]) + + def get_test_show_device(self): + return (0, [{ + 'ID': self.fake_system_id[0], + 'Connected-IP': self.fake_manage_port_ip[0], + 'Name': '---', + 'Index': '0*', + 'JBOD-ID': 'N/A', + 'Capacity': '1.22 TB', + 'Model': self.fake_model[0], + 'Service-ID': '8445676', + }]) + + def get_fake_show_device(self): + msg = """ +show device + Index ID Model Name Connected-IP JBOD-ID Capacity Service-ID +------------------------------------------------------------------------ + 0* %s %s --- %s N/A 1.22 TB 8445676 + +CLI: Successful: 1 device(s) found +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_system_id[0], + self.fake_model[0], + self.fake_manage_port_ip[0]) + + def get_test_show_channel_single(self): + return (0, [{ + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '0', + 'MCS': 'N/A', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '1', + 'MCS': '0', + 'curClock': '---', + }]) + + def get_test_show_channel_with_mcs(self): + return (0, [{ + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '0', + 'MCS': 'N/A', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '1', + 'MCS': '1', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '2', + 'MCS': '1', + 'curClock': '---', + }, { + 'ID': '---', + 'defClock': '6.0 Gbps', + 'Type': 'SAS', + 'Mode': 'Drive', + 'Width': 'SAS', + 'Ch': '3', + 'MCS': 'N/A', + 'curClock': '6.0 Gbps', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '4', + 'MCS': '2', + 'curClock': '---', + }, { + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '5', + 'MCS': 'N/A', + 'curClock': '---', + }]) + + def get_test_show_channel_without_mcs(self): + return (0, [{ + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '0', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '1', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '2', + 'curClock': '---', + }, { + 'ID': '---', + 'defClock': '6.0 Gbps', + 'Type': 'SAS', + 'Mode': 'Drive', + 'Width': 'SAS', + 'Ch': '3', + 'curClock': '6.0 Gbps', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '4', + 'curClock': '---', + }, { + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '5', + 'curClock': '---', + }]) + + def get_test_show_channel_with_diff_target_id(self): + return (0, [{ + 'ID': '32', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '0', + 'MCS': 'N/A', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '1', + 'MCS': '0', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '2', + 'MCS': '1', + 'curClock': '---', + }, { + 'ID': '---', + 'defClock': '6.0 Gbps', + 'Type': 'SAS', + 'Mode': 'Drive', + 'Width': 'SAS', + 'Ch': '3', + 'MCS': 'N/A', + 'curClock': '6.0 Gbps', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '4', + 'MCS': '2', + 'curClock': '---', + }, { + 'ID': '48', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '5', + 'MCS': 'N/A', + 'curClock': '---', + }]) + + def get_test_show_channel(self): + return (0, [{ + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '0', + 'MCS': 'N/A', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '1', + 'MCS': '0', + 'curClock': '---', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '2', + 'MCS': '1', + 'curClock': '---', + }, { + 'ID': '---', + 'defClock': '6.0 Gbps', + 'Type': 'SAS', + 'Mode': 'Drive', + 'Width': 'SAS', + 'Ch': '3', + 'MCS': 'N/A', + 'curClock': '6.0 Gbps', + }, { + 'ID': '0', + 'defClock': 'Auto', + 'Type': 'NETWORK', + 'Mode': 'Host', + 'Width': 'iSCSI', + 'Ch': '4', + 'MCS': '2', + 'curClock': '---', + }, { + 'ID': '112', + 'defClock': 'Auto', + 'Type': 'FIBRE', + 'Mode': 'Host', + 'Width': '---', + 'Ch': '5', + 'MCS': 'N/A', + 'curClock': '---', + }]) + + def get_fake_show_channel(self): + msg = """ +show ch + Ch Mode Type defClock curClock Width ID MCS +--------------------------------------------------------- + 0 Host FIBRE Auto --- --- 112 N/A + 1 Host NETWORK Auto --- iSCSI 0 0 + 2 Host NETWORK Auto --- iSCSI 0 1 + 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- N/A + 4 Host NETWORK Auto --- iSCSI 0 2 + 5 Host FIBRE Auto --- --- 112 N/A + +CLI: Successful: : 6 channel(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg + + def get_test_show_channel_r_model_diff_target_id(self): + return (0, [{ + 'Mode': 'Host', + 'AID': '32', + 'defClock': 'Auto', + 'MCS': 'N/A', + 'Ch': '0', + 'BID': '33', + 'curClock': '---', + 'Width': '---', + 'Type': 'FIBRE', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '0', + 'Ch': '1', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '1', + 'Ch': '2', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Drive', + 'AID': '---', + 'defClock': '6.0 Gbps', + 'MCS': 'N/A', + 'Ch': '3', + 'BID': '---', + 'curClock': '6.0 Gbps', + 'Width': 'SAS', + 'Type': 'SAS', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '2', + 'Ch': '4', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Host', + 'AID': '48', + 'defClock': 'Auto', + 'MCS': 'N/A', + 'Ch': '5', + 'BID': '49', + 'curClock': '---', + 'Width': '---', + 'Type': 'FIBRE', + }]) + + def get_test_show_channel_r_model(self): + return (0, [{ + 'Mode': 'Host', + 'AID': '112', + 'defClock': 'Auto', + 'MCS': 'N/A', + 'Ch': '0', + 'BID': '113', + 'curClock': '---', + 'Width': '---', + 'Type': 'FIBRE', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '0', + 'Ch': '1', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '1', + 'Ch': '2', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Drive', + 'AID': '---', + 'defClock': '6.0 Gbps', + 'MCS': 'N/A', + 'Ch': '3', + 'BID': '---', + 'curClock': '6.0 Gbps', + 'Width': 'SAS', + 'Type': 'SAS', + }, { + 'Mode': 'Host', + 'AID': '0', + 'defClock': 'Auto', + 'MCS': '2', + 'Ch': '4', + 'BID': '1', + 'curClock': '---', + 'Width': 'iSCSI', + 'Type': 'NETWORK', + }, { + 'Mode': 'Host', + 'AID': '112', + 'defClock': 'Auto', + 'MCS': 'N/A', + 'Ch': '5', + 'BID': '113', + 'curClock': '---', + 'Width': '---', + 'Type': 'FIBRE', + }]) + + def get_fake_show_channel_r_model(self): + msg = """ +show ch + Ch Mode Type defClock curClock Width AID BID MCS +---------------------------------------------------------------- + 0 Host FIBRE Auto --- --- 112 113 N/A + 1 Host NETWORK Auto --- iSCSI 0 1 0 + 2 Host NETWORK Auto --- iSCSI 0 1 1 + 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- --- N/A + 4 Host NETWORK Auto --- iSCSI 0 1 2 + 5 Host FIBRE Auto --- --- 112 113 N/A + +CLI: Successful: : 9 channel(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg + + def get_show_map_with_lun_map_on_zoning(self): + return (0, [{ + 'Ch': '0', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_wwpns[0], + 'Target': '112', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }]) + + def get_test_show_map(self, partition_id=None, channel_id=None): + if partition_id and channel_id: + return (0, [{ + 'Ch': channel_id, + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': '---', + 'Target': '0', + 'Name': 'Part-1', + 'ID': partition_id, + }, { + 'Ch': channel_id, + 'LUN': '1', + 'Media': 'PART', + 'Host-ID': '---', + 'Target': '0', + 'Name': 'Part-1', + 'ID': partition_id, + }]) + else: + return (0, [{ + 'Ch': '1', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_iqn[0], + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '1', + 'LUN': '1', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_iqn[0], + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '4', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_iqn[0], + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }]) + + def get_test_show_map_fc(self): + return (0, [{ + 'Ch': '0', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_wwpns[0], + 'Target': '112', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '0', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_wwpns[1], + 'Target': '112', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '5', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_wwpns[0], + 'Target': '112', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '5', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': self.fake_initiator_wwpns[1], + 'Target': '112', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }]) + + def get_test_show_map_multimap(self): + return (0, [{ + 'Ch': '1', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': '---', + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '1', + 'LUN': '1', + 'Media': 'PART', + 'Host-ID': '---', + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '4', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': '210000E08B0AADE1', + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }, { + 'Ch': '4', + 'LUN': '0', + 'Media': 'PART', + 'Host-ID': '210000E08B0AADE2', + 'Target': '0', + 'Name': 'Part-1', + 'ID': self.fake_partition_id[0], + }]) + + def get_fake_show_map(self): + msg = """ +show map + Ch Target LUN Media Name ID Host-ID +----------------------------------------------------------- + 1 0 0 PART Part-1 %s %s + 1 0 1 PART Part-1 %s %s + 4 0 0 PART Part-1 %s %s + +CLI: Successful: 3 mapping(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_partition_id[0], + self.fake_initiator_iqn[0], + self.fake_partition_id[0], + self.fake_initiator_iqn[0], + self.fake_partition_id[0], + self.fake_initiator_iqn[0]) + + def get_test_show_license_full(self): + return (0, { + 'Local Volume Copy': { + 'Support': False, + 'Amount': '8/256', + }, + 'Synchronous Remote Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Snapshot': { + 'Support': False, + 'Amount': '1024/16384', + }, + 'Self-Encryption Drives': { + 'Support': False, + 'Amount': '---', + }, + 'Compression': { + 'Support': False, + 'Amount': '---', + }, + 'Local volume Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Storage Tiering': { + 'Support': False, + 'Amount': '---', + }, + 'Asynchronous Remote Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Scale-out': { + 'Support': False, + 'Amount': 'Not Support', + }, + 'Thin Provisioning': { + 'Support': False, + 'Amount': '---', + }, + 'Max JBOD': { + 'Support': False, + 'Amount': '15', + }, + 'EonPath': { + 'Support': False, + 'Amount': '---', + } + }) + + def get_test_show_license_thin(self): + return (0, { + 'Local Volume Copy': { + 'Support': False, + 'Amount': '8/256', + }, + 'Synchronous Remote Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Snapshot': { + 'Support': False, + 'Amount': '1024/16384', + }, + 'Self-Encryption Drives': { + 'Support': False, + 'Amount': '---', + }, + 'Compression': { + 'Support': False, + 'Amount': '---', + }, + 'Local volume Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Storage Tiering': { + 'Support': False, + 'Amount': '---', + }, + 'Asynchronous Remote Mirror': { + 'Support': False, + 'Amount': '8/256', + }, + 'Scale-out': { + 'Support': False, + 'Amount': 'Not Support', + }, + 'Thin Provisioning': { + 'Support': True, + 'Amount': '---', + }, + 'Max JBOD': { + 'Support': False, + 'Amount': '15', + }, + 'EonPath': { + 'Support': False, + 'Amount': '---', + } + }) + + def get_fake_show_license(self): + msg = """ +show license + License Amount(Partition/Subsystem) Expired +------------------------------------------------------------------ + EonPath --- Expired + Scale-out Not Support --- + Snapshot 1024/16384 Expired + Local Volume Copy 8/256 Expired + Local volume Mirror 8/256 Expired + Synchronous Remote Mirror 8/256 Expired + Asynchronous Remote Mirror 8/256 Expired + Compression --- Expired + Thin Provisioning --- Expired + Storage Tiering --- Expired + Max JBOD 15 Expired + Self-Encryption Drives --- Expired + +CLI: Successful +Return: 0x0000 + +RAIDCmd:> +""" + return msg + + def get_test_show_wwn_with_g_model(self): + return (0, [{ + 'ID': 'ID:112', + 'WWPN': self.fake_target_wwpns[0], + 'CH': '0', + 'WWNN': self.fake_target_wwnns[0], + }, { + 'ID': 'ID:112', + 'WWPN': self.fake_target_wwpns[1], + 'CH': '5', + 'WWNN': self.fake_target_wwnns[0], + }]) + + def get_test_show_wwn_with_diff_target_id(self): + return (0, [{ + 'ID': 'AID:32', + 'WWPN': self.fake_target_wwpns[0], + 'CH': '0', + 'WWNN': self.fake_target_wwnns[0], + }, { + 'ID': 'BID:33', + 'WWPN': self.fake_target_wwpns[2], + 'CH': '0', + 'WWNN': self.fake_target_wwnns[1], + }, { + 'ID': 'AID:48', + 'WWPN': self.fake_target_wwpns[1], + 'CH': '5', + 'WWNN': self.fake_target_wwnns[0], + }, { + 'ID': 'BID:49', + 'WWPN': self.fake_target_wwpns[3], + 'CH': '5', + 'WWNN': self.fake_target_wwnns[1], + }]) + + def get_test_show_wwn(self): + return (0, [{ + 'ID': 'AID:112', + 'WWPN': self.fake_target_wwpns[0], + 'CH': '0', + 'WWNN': self.fake_target_wwnns[0], + }, { + 'ID': 'BID:113', + 'WWPN': self.fake_target_wwpns[2], + 'CH': '0', + 'WWNN': self.fake_target_wwnns[1], + }, { + 'ID': 'AID:112', + 'WWPN': self.fake_target_wwpns[1], + 'CH': '5', + 'WWNN': self.fake_target_wwnns[0], + }, { + 'ID': 'BID:113', + 'WWPN': self.fake_target_wwpns[3], + 'CH': '5', + 'WWNN': self.fake_target_wwnns[1], + }]) + + def get_fake_show_wwn(self): + msg = """ +show wwn +WWN entries in controller for host channels: + CH ID WWNN WWPN +------------------------------------------------- + 0 AID:112 %s %s + 0 BID:113 %s %s + 5 AID:112 %s %s + 5 BID:113 %s %s + +CLI: Successful +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_target_wwnns[0], self.fake_target_wwpns[0], + self.fake_target_wwnns[1], self.fake_target_wwpns[2], + self.fake_target_wwnns[0], self.fake_target_wwpns[1], + self.fake_target_wwnns[1], self.fake_target_wwpns[3]) + + def get_test_show_iqn(self): + return (0, [{ + 'Name': self.fake_initiator_iqn[0][-16:], + 'IQN': self.fake_initiator_iqn[0], + 'User': '---', + 'Password': '---', + 'Target': '---', + 'Target-Password': '---', + 'IP': '0.0.0.0', + 'Mask': '0.0.0.0', + }]) + + def get_fake_show_iqn(self): + msg = """ +show iqn +Detected host IQN: + IQN +---------------------------------------- + %s + + +List of initiator IQN(s): +-------------------------- + Name: %s + IQN: %s + User: --- + Password: --- + Target: --- + Target-Password: --- + IP: 0.0.0.0 + Mask: 0.0.0.0 + +CLI: Successful: 1 initiator iqn(s) shown +Return: 0x0000 + +RAIDCmd:> +""" + return msg % (self.fake_initiator_iqn[0], + self.fake_initiator_iqn[0][-16:], + self.fake_initiator_iqn[0]) + + def get_test_show_host(self): + return (0, [{ + 'Fibre connection option': 'Point to point only', + 'Max queued count': '1024', + 'Max LUN per ID': '64', + 'CHAP': 'Disabled', + 'Jumbo frame': 'Disabled', + 'Max concurrent LUN connection': '4', + 'LUN connection reserved tags': '4', + 'Peripheral device type': 'No Device Present (Type=0x7f)', + 'Peripheral device qualifier': 'Connected', + 'Removable media support': 'Disabled', + 'LUN applicability': 'First Undefined LUN', + 'Supported CHS Cylinder': 'Variable', + 'Supported CHS Head': 'Variable', + 'Supported CHS Sector': 'Variable', + }]) + + def get_fake_show_host(self): + msg = """ +show host + Fibre connection option: Point to point only + Max queued count: 1024 + Max LUN per ID: 64 + CHAP: Disabled + Jumbo frame: Disabled + Max concurrent LUN connection: 4 + LUN connection reserved tags: 4 + Peripheral device type: No Device Present (Type=0x7f) + Peripheral device qualifier: Connected + Removable media support: Disabled + LUN applicability: First Undefined LUN + Supported CHS Cylinder: Variable + Supported CHS Head: Variable + Supported CHS Sector: Variable + +CLI: Successful +Return: 0x0000 + +RAIDCmd:> +""" + return msg + + def get_fake_discovery(self, target_iqns, target_portals): + template = '%s,1 %s' + + if len(target_iqns) == 1: + result = template % (target_portals[0], target_iqns[0]) + return (0, result) + + result = [] + for i in range(len(target_iqns)): + result.append(template % ( + target_portals[i], target_iqns[i])) + return (0, '\n'.join(result)) + + class Fake_cinder_object(object): + id = None + + def __init__(self, test_volume): + self.id = test_volume + + class Fake_cinder_snapshot(Fake_cinder_object): + provider_location = None + + def __init__(self, id, provider_location): + self.id = id + self.provider_location = provider_location + + fake_cinder_volumes = [Fake_cinder_object(test_dst_volume['id'])] + fake_cinder_snapshots = [Fake_cinder_object(fake_snapshot_name[1])] + + +class InfortrendCLITestCase(test.TestCase): + + CommandList = ['CreateLD', 'CreateLV', + 'CreatePartition', 'DeletePartition', + 'CreateMap', 'DeleteMap', + 'CreateSnapshot', 'DeleteSnapshot', + 'CreateReplica', 'DeleteReplica', + 'CreateIQN', 'DeleteIQN', + 'ShowLD', 'ShowLV', + 'ShowPartition', 'ShowSnapshot', + 'ShowDevice', 'ShowChannel', + 'ShowDisk', 'ShowMap', + 'ShowNet', 'ShowLicense', + 'ShowWWN', 'ShowReplica', + 'ShowIQN', 'ShowHost', 'ConnectRaid', + 'SetPartition', 'SetLV'] + + def __init__(self, *args, **kwargs): + super(InfortrendCLITestCase, self).__init__(*args, **kwargs) + self.cli_data = InfortrendCLITestData() + + def _cli_set(self, cli, fake_result): + cli_conf = { + 'path': '', + 'password': '', + 'ip': '', + 'cli_retry_time': 1, + 'raidcmd_timeout': 60, + 'cli_cache': False, + 'pid': 12345, + 'fd': 10, + } + cli = cli(cli_conf) + + cli._execute = mock.Mock(return_value=fake_result) + + return cli + + def _cli_multi_set(self, cli, fake_result_list): + cli_conf = { + 'path': '', + 'password': '', + 'ip': '', + 'cli_retry_time': 5, + 'raidcmd_timeout': 60, + 'cli_cache': False, + 'pid': 12345, + 'fd': 10, + } + cli = cli(cli_conf) + + cli._execute = mock.Mock(side_effect=fake_result_list) + + return cli + + def _test_command_succeed(self, command): + + fake_cli_succeed = self.cli_data.get_fake_cli_succeed() + test_command = self._cli_set(command, fake_cli_succeed) + + rc, out = test_command.execute() + self.assertEqual(0, rc) + + def _test_command_failed(self, command): + + fake_cli_failed = self.cli_data.get_fake_cli_failed() + test_command = self._cli_set(command, fake_cli_failed) + + rc, out = test_command.execute() + self.assertEqual(int('0x000c', 16), rc) + + def _test_command_failed_retry_succeed(self, log_error, command): + + log_error.reset_mock() + + LOG_ERROR_STR = ( + 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' + ) + + fake_result_list = [ + self.cli_data.get_fake_cli_failed(), + self.cli_data.get_fake_cli_failed_with_network(), + self.cli_data.get_fake_cli_succeed(), + ] + test_command = self._cli_multi_set(command, fake_result_list) + + rc, out = test_command.execute() + self.assertEqual(11, rc) + + expect_log_error = [ + mock.call(LOG_ERROR_STR, { + 'retry': 1, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }), + mock.call(LOG_ERROR_STR, { + 'retry': 2, + 'method': test_command.__class__.__name__, + 'rc': int('0x000b', 16), + 'reason': 'Not exist: There is no such partition: 3345678', + }) + ] + log_error.assert_has_calls(expect_log_error) + + def _test_command_failed_retry_timeout(self, log_error, command): + + log_error.reset_mock() + + LOG_ERROR_STR = ( + 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' + ) + + fake_result_list = [ + self.cli_data.get_fake_cli_failed(), + self.cli_data.get_fake_cli_failed(), + self.cli_data.get_fake_cli_failed(), + self.cli_data.get_fake_cli_failed(), + self.cli_data.get_fake_cli_failed(), + ] + test_command = self._cli_multi_set(command, fake_result_list) + + rc, out = test_command.execute() + self.assertEqual(int('0x000c', 16), rc) + self.assertEqual('No selected device', out) + + expect_log_error = [ + mock.call(LOG_ERROR_STR, { + 'retry': 1, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }), + mock.call(LOG_ERROR_STR, { + 'retry': 2, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }), + mock.call(LOG_ERROR_STR, { + 'retry': 3, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }), + mock.call(LOG_ERROR_STR, { + 'retry': 4, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }), + mock.call(LOG_ERROR_STR, { + 'retry': 5, + 'method': test_command.__class__.__name__, + 'rc': int('0x000c', 16), + 'reason': 'No selected device', + }) + ] + log_error.assert_has_calls(expect_log_error) + + def _test_show_command(self, fake_data, test_data, command, *params): + + test_command = self._cli_set(command, fake_data) + + rc, out = test_command.execute(*params) + + self.assertEqual(test_data[0], rc) + + if isinstance(out, list): + for i in range(len(test_data[1])): + self.assertDictEqual(test_data[1][i], out[i]) + else: + self.assertDictEqual(test_data[1], out) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_cli_all_command_execute(self): + + for command in self.CommandList: + self._test_command_succeed(getattr(cli, command)) + self._test_command_failed(getattr(cli, command)) + + @mock.patch.object(cli.LOG, 'error') + def test_cli_all_command_execute_retry_succeed(self, log_error): + + for command in self.CommandList: + self._test_command_failed_retry_succeed( + log_error, getattr(cli, command)) + + @mock.patch.object(cli.LOG, 'error') + def test_cli_all_command_execute_retry_timeout(self, log_error): + + for command in self.CommandList: + self._test_command_failed_retry_timeout( + log_error, getattr(cli, command)) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_snapshot(self): + self._test_show_command( + self.cli_data.get_fake_show_snapshot(), + self.cli_data.get_test_show_snapshot(), + cli.ShowSnapshot) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_snapshot_detail(self): + self._test_show_command( + self.cli_data.get_fake_show_snapshot_detail(), + self.cli_data.get_test_show_snapshot_detail(), + cli.ShowSnapshot, '-l') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_net(self): + self._test_show_command( + self.cli_data.get_fake_show_net(), + self.cli_data.get_test_show_net(), + cli.ShowNet) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_detail_net(self): + self._test_show_command( + self.cli_data.get_fake_show_net_detail(), + self.cli_data.get_test_show_net_detail(), + cli.ShowNet, '-l') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_partition(self): + self._test_show_command( + self.cli_data.get_fake_show_partition(), + self.cli_data.get_test_show_partition(), + cli.ShowPartition) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_partition_detail(self): + self._test_show_command( + self.cli_data.get_fake_show_partition_detail(), + self.cli_data.get_test_show_partition_detail(), + cli.ShowPartition, '-l') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_lv(self): + self._test_show_command( + self.cli_data.get_fake_show_lv(), + self.cli_data.get_test_show_lv(), + cli.ShowLV) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_lv_detail(self): + self._test_show_command( + self.cli_data.get_fake_show_lv_detail(), + self.cli_data.get_test_show_lv_detail(), + cli.ShowLV, '-l') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_lv_tier(self): + self._test_show_command( + self.cli_data.get_fake_show_lv_tier(), + self.cli_data.get_test_show_lv_tier(), + cli.ShowLV, 'tier') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_device(self): + self._test_show_command( + self.cli_data.get_fake_show_device(), + self.cli_data.get_test_show_device(), + cli.ShowDevice) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_channel(self): + self._test_show_command( + self.cli_data.get_fake_show_channel(), + self.cli_data.get_test_show_channel(), + cli.ShowChannel) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_channel_with_r_model(self): + self._test_show_command( + self.cli_data.get_fake_show_channel_r_model(), + self.cli_data.get_test_show_channel_r_model(), + cli.ShowChannel) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_map(self): + self._test_show_command( + self.cli_data.get_fake_show_map(), + self.cli_data.get_test_show_map(), + cli.ShowMap) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_license(self): + self._test_show_command( + self.cli_data.get_fake_show_license(), + self.cli_data.get_test_show_license_full(), + cli.ShowLicense) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_replica_detail(self): + self._test_show_command( + self.cli_data.get_fake_show_replica_detail(), + self.cli_data.get_test_show_replica_detail(), + cli.ShowReplica, '-l') + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_wwn(self): + self._test_show_command( + self.cli_data.get_fake_show_wwn(), + self.cli_data.get_test_show_wwn(), + cli.ShowWWN) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_iqn(self): + self._test_show_command( + self.cli_data.get_fake_show_iqn(), + self.cli_data.get_test_show_iqn(), + cli.ShowIQN) + + @mock.patch.object(cli.LOG, 'debug', mock.Mock()) + def test_show_host(self): + self._test_show_command( + self.cli_data.get_fake_show_host(), + self.cli_data.get_test_show_host(), + cli.ShowHost) diff --git a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py new file mode 100644 index 00000000000..9d4714fdca9 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py @@ -0,0 +1,2927 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock + +from cinder import exception +from cinder import test +from cinder.tests.unit import utils +from cinder.tests.unit.volume.drivers.infortrend import test_infortrend_cli +from cinder.volume import configuration +from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli +from cinder.volume import utils as cv_utils + +SUCCEED = (0, '') +FAKE_ERROR_RETURN = (-1, '') + + +class InfortrendTestCase(test.TestCase): + + def __init__(self, *args, **kwargs): + super(InfortrendTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(InfortrendTestCase, self).setUp() + self.cli_data = test_infortrend_cli.InfortrendCLITestData() + + self.configuration = configuration.Configuration(None) + self.configuration.append_config_values = mock.Mock(return_value=0) + self.configuration.safe_get = self._fake_safe_get + + def _fake_safe_get(self, key): + return getattr(self.configuration, key) + + def _driver_setup(self, mock_commands, configuration=None): + if configuration is None: + configuration = self.configuration + self.driver = self._get_driver(configuration) + + mock_commands_execute = self._mock_command_execute(mock_commands) + mock_cli = mock.Mock(side_effect=mock_commands_execute) + + self.driver._execute_command = mock_cli + + def _get_driver(self, conf): + raise NotImplementedError + + def _mock_command_execute(self, mock_commands): + def fake_execute_command(cli_type, *args, **kwargs): + if cli_type in mock_commands.keys(): + if isinstance(mock_commands[cli_type], list): + ret = mock_commands[cli_type][0] + del mock_commands[cli_type][0] + return ret + elif isinstance(mock_commands[cli_type], tuple): + return mock_commands[cli_type] + else: + return mock_commands[cli_type](*args, **kwargs) + return FAKE_ERROR_RETURN + return fake_execute_command + + def _mock_show_lv_for_migrate(self, *args, **kwargs): + if 'tier' in args: + return self.cli_data.get_test_show_lv_tier_for_migration() + return self.cli_data.get_test_show_lv() + + def _mock_show_lv(self, *args, **kwargs): + if 'tier' in args: + return self.cli_data.get_test_show_lv_tier() + return self.cli_data.get_test_show_lv() + + def _assert_cli_has_calls(self, expect_cli_cmd): + self.driver._execute_command.assert_has_calls(expect_cli_cmd) + + +class InfortrendFCCommonTestCase(InfortrendTestCase): + + def __init__(self, *args, **kwargs): + super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(InfortrendFCCommonTestCase, self).setUp() + + self.configuration.volume_backend_name = 'infortrend_backend_1' + self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] + self.configuration.san_password = '111111' + self.configuration.infortrend_provisioning = 'full' + self.configuration.infortrend_tiering = '0' + self.configuration.infortrend_pools_name = ['LV-1', 'LV-2'] + self.configuration.infortrend_slots_a_channels_id = [0, 5] + self.configuration.infortrend_slots_b_channels_id = [0, 5] + self.pool_dict = { + 'LV-1': self.cli_data.fake_lv_id[0], + 'LV-2': self.cli_data.fake_lv_id[1], + } + + @mock.patch.object( + common_cli.InfortrendCommon, '_init_raidcmd', mock.Mock()) + @mock.patch.object( + common_cli.InfortrendCommon, '_init_raid_connection', mock.Mock()) + @mock.patch.object( + common_cli.InfortrendCommon, '_set_raidcmd', mock.Mock()) + def _get_driver(self, conf): + driver = common_cli.InfortrendCommon('FC', configuration=conf) + driver.do_setup() + driver.pool_dict = self.pool_dict + return driver + + def test_normal_channel(self): + + test_map_dict = { + 'slot_a': {'0': [], '5': []}, + 'slot_b': {}, + } + test_target_dict = { + 'slot_a': {'0': '112', '5': '112'}, + 'slot_b': {}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + } + self._driver_setup(mock_commands) + + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + def test_normal_channel_with_r_model(self): + + test_map_dict = { + 'slot_a': {'0': [], '5': []}, + 'slot_b': {'0': [], '5': []}, + } + test_target_dict = { + 'slot_a': {'0': '112', '5': '112'}, + 'slot_b': {'0': '113', '5': '113'}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + } + self._driver_setup(mock_commands) + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual(self.cli_data.test_fc_properties, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_specific_channel(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + configuration = copy.copy(self.configuration) + configuration.infortrend_slots_a_channels_id = '5' + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands, configuration) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual( + self.cli_data.test_fc_properties_with_specific_channel, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_diff_target_id(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + test_initiator_wwpns = test_connector['wwpns'] + test_partition_id = self.cli_data.fake_partition_id[0] + configuration = copy.copy(self.configuration) + configuration.infortrend_slots_a_channels_id = '5' + + mock_commands = { + 'ShowChannel': + self.cli_data.get_test_show_channel_with_diff_target_id(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands, configuration) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowChannel'), + mock.call('ShowWWN'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('ShowMap'), + mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertDictEqual( + self.cli_data.test_fc_properties_with_specific_channel, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_multipath_with_r_model(self): + + test_volume = self.cli_data.test_volume + test_connector = copy.deepcopy(self.cli_data.test_connector_fc) + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual( + self.cli_data.test_fc_properties_multipath_r_model, properties) + + def test_initialize_connection_with_get_wwn_fail(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.initialize_connection, + test_volume, + test_connector) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_zoning(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + test_initiator_wwpns = test_connector['wwpns'] + test_partition_id = self.cli_data.fake_partition_id[0] + test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] + test_lookup_map = self.cli_data.fake_lookup_map + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + self.driver.fc_lookup_service = mock.Mock() + get_device_mapping_from_network = ( + self.driver.fc_lookup_service.get_device_mapping_from_network + ) + get_device_mapping_from_network.return_value = test_lookup_map + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + get_device_mapping_from_network.assert_has_calls( + [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowChannel'), + mock.call('ShowWWN'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('ShowMap'), + mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertDictEqual( + self.cli_data.test_fc_properties_zoning, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_zoning_r_model(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + test_initiator_wwpns = test_connector['wwpns'] + test_partition_id = self.cli_data.fake_partition_id[0] + test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] + test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] + test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] + test_lookup_map = self.cli_data.fake_lookup_map_r_model + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + self.driver.fc_lookup_service = mock.Mock() + get_device_mapping_from_network = ( + self.driver.fc_lookup_service.get_device_mapping_from_network + ) + get_device_mapping_from_network.return_value = test_lookup_map + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + get_device_mapping_from_network.assert_has_calls( + [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowChannel'), + mock.call('ShowWWN'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('ShowMap'), + mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertDictEqual( + self.cli_data.test_fc_properties_zoning_r_model, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_zoning_r_model_diff_target_id(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_fc + test_initiator_wwpns = test_connector['wwpns'] + test_partition_id = self.cli_data.fake_partition_id[0] + test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] + test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] + test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] + test_lookup_map = self.cli_data.fake_lookup_map_r_model + + mock_commands = { + 'ShowChannel': + self.cli_data.get_test_show_channel_r_model_diff_target_id(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'CreateMap': SUCCEED, + 'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + self.driver.fc_lookup_service = mock.Mock() + get_device_mapping_from_network = ( + self.driver.fc_lookup_service.get_device_mapping_from_network + ) + get_device_mapping_from_network.return_value = test_lookup_map + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + get_device_mapping_from_network.assert_has_calls( + [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowChannel'), + mock.call('ShowWWN'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('ShowMap'), + mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', + 'wwn=%s' % test_initiator_wwpns[0]), + mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', + 'wwn=%s' % test_initiator_wwpns[1]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertDictEqual( + self.cli_data.test_fc_properties_zoning_r_model, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_terminate_connection(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_connector = self.cli_data.test_connector_fc + + mock_commands = { + 'DeleteMap': SUCCEED, + 'ShowMap': [self.cli_data.get_test_show_map_fc(), + self.cli_data.get_test_show_empty_list()], + 'ShowWWN': SUCCEED, + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + self.driver.terminate_connection(test_volume, test_connector) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('DeleteMap', + 'part', test_partition_id, '0', '112', '0', '-y'), + mock.call('DeleteMap', + 'part', test_partition_id, '5', '112', '0', '-y'), + mock.call('ShowMap'), + mock.call('ShowWWN'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_terminate_connection_with_zoning(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_connector = self.cli_data.test_connector_fc + test_all_target_wwpns = self.cli_data.fake_target_wwpns[:2] + test_lookup_map = self.cli_data.fake_lookup_map + + mock_commands = { + 'DeleteMap': SUCCEED, + 'ShowMap': [self.cli_data.get_test_show_map_fc(), + self.cli_data.get_test_show_empty_list()], + 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + self.driver.map_dict = { + 'slot_a': {'0': [], '5': []}, + 'slot_b': {}, + } + self.driver.fc_lookup_service = mock.Mock() + get_device_mapping_from_network = ( + self.driver.fc_lookup_service.get_device_mapping_from_network + ) + get_device_mapping_from_network.return_value = test_lookup_map + + conn_info = self.driver.terminate_connection( + test_volume, test_connector) + + get_device_mapping_from_network.assert_has_calls( + [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) + + expect_cli_cmd = [ + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('DeleteMap', + 'part', test_partition_id, '0', '112', '0', '-y'), + mock.call('DeleteMap', + 'part', test_partition_id, '5', '112', '0', '-y'), + mock.call('ShowMap'), + mock.call('ShowWWN'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertDictEqual( + self.cli_data.test_fc_terminate_conn_info, conn_info) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_terminate_connection_with_zoning_and_lun_map_exist(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_connector = self.cli_data.test_connector_fc + + mock_commands = { + 'DeleteMap': SUCCEED, + 'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + self.driver.map_dict = { + 'slot_a': {'0': [], '5': []}, + 'slot_b': {}, + } + self.driver.target_dict = { + 'slot_a': {'0': '112', '5': '112'}, + 'slot_b': {}, + } + self.driver.fc_lookup_service = mock.Mock() + + conn_info = self.driver.terminate_connection( + test_volume, test_connector) + + expect_cli_cmd = [ + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('DeleteMap', + 'part', test_partition_id, '0', '112', '0', '-y'), + mock.call('ShowMap'), + ] + expect_conn_info = {'driver_volume_type': 'fibre_channel', + 'data': {}} + self._assert_cli_has_calls(expect_cli_cmd) + + self.assertEqual(expect_conn_info, conn_info) + + +class InfortrendiSCSICommonTestCase(InfortrendTestCase): + + def __init__(self, *args, **kwargs): + super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(InfortrendiSCSICommonTestCase, self).setUp() + + self.configuration.volume_backend_name = 'infortrend_backend_1' + self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] + self.configuration.san_password = '111111' + self.configuration.infortrend_provisioning = 'full' + self.configuration.infortrend_tiering = '0' + self.configuration.infortrend_pools_name = ['LV-1', 'LV-2'] + self.configuration.infortrend_slots_a_channels_id = [1, 2, 4] + self.configuration.infortrend_slots_b_channels_id = [1, 2, 4] + self.pool_dict = { + 'LV-1': self.cli_data.fake_lv_id[0], + 'LV-2': self.cli_data.fake_lv_id[1], + } + + @mock.patch.object( + common_cli.InfortrendCommon, '_init_raidcmd', mock.Mock()) + @mock.patch.object( + common_cli.InfortrendCommon, '_init_raid_connection', mock.Mock()) + @mock.patch.object( + common_cli.InfortrendCommon, '_set_raidcmd', mock.Mock()) + def _get_driver(self, conf): + driver = common_cli.InfortrendCommon('iSCSI', configuration=conf) + driver.do_setup() + driver.pool_dict = self.pool_dict + return driver + + @mock.patch.object(common_cli.LOG, 'warning') + def test_create_map_warning_return_code(self, log_warning): + + FAKE_RETURN_CODE = (20, '') + mock_commands = { + 'CreateMap': FAKE_RETURN_CODE, + } + self._driver_setup(mock_commands) + + self.driver._execute('CreateMap') + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_delete_map_warning_return_code(self, log_warning): + + FAKE_RETURN_CODE = (11, '') + mock_commands = { + 'DeleteMap': FAKE_RETURN_CODE, + } + self._driver_setup(mock_commands) + + self.driver._execute('DeleteMap') + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_create_iqn_warning_return_code(self, log_warning): + + FAKE_RETURN_CODE = (20, '') + mock_commands = { + 'CreateIQN': FAKE_RETURN_CODE, + } + self._driver_setup(mock_commands) + + self.driver._execute('CreateIQN') + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_delete_iqn_warning_return_code_has_map(self, log_warning): + + FAKE_RETURN_CODE = (20, '') + mock_commands = { + 'DeleteIQN': FAKE_RETURN_CODE, + } + self._driver_setup(mock_commands) + + self.driver._execute('DeleteIQN') + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_delete_iqn_warning_return_code_no_such_name(self, log_warning): + + FAKE_RETURN_CODE = (11, '') + mock_commands = { + 'DeleteIQN': FAKE_RETURN_CODE, + } + self._driver_setup(mock_commands) + + self.driver._execute('DeleteIQN') + self.assertEqual(1, log_warning.call_count) + + def test_normal_channel(self): + + test_map_dict = { + 'slot_a': {'1': [], '2': [], '4': []}, + 'slot_b': {}, + } + test_target_dict = { + 'slot_a': {'1': '0', '2': '0', '4': '0'}, + 'slot_b': {}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + } + self._driver_setup(mock_commands) + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + def test_normal_channel_with_multipath(self): + + test_map_dict = { + 'slot_a': {'1': [], '2': [], '4': []}, + 'slot_b': {'1': [], '2': [], '4': []}, + } + test_target_dict = { + 'slot_a': {'1': '0', '2': '0', '4': '0'}, + 'slot_b': {'1': '1', '2': '1', '4': '1'}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + } + self._driver_setup(mock_commands) + + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + def test_specific_channel(self): + + configuration = copy.copy(self.configuration) + configuration.infortrend_slots_a_channels_id = '2, 4' + + test_map_dict = { + 'slot_a': {'2': [], '4': []}, + 'slot_b': {}, + } + test_target_dict = { + 'slot_a': {'2': '0', '4': '0'}, + 'slot_b': {}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + } + self._driver_setup(mock_commands, configuration) + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + def test_update_mcs_dict(self): + + configuration = copy.copy(self.configuration) + configuration.use_multipath_for_image_xfer = True + + test_mcs_dict = { + 'slot_a': {'1': ['1', '2'], '2': ['4']}, + 'slot_b': {}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), + } + + self._driver_setup(mock_commands, configuration) + self.driver._init_map_info() + + self.assertDictEqual(test_mcs_dict, self.driver.mcs_dict) + + def test_mapping_info_with_mpio_no_mcs(self): + + configuration = copy.copy(self.configuration) + configuration.use_multipath_for_image_xfer = True + + fake_mcs_dict = { + 'slot_a': {'1': ['1'], '2': ['2'], '4': ['4']}, + 'slot_b': {'1': ['1'], '2': ['2'], '4': ['4']}, + } + lun_list = list(range(0, 127)) + fake_map_dict = { + 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, + 'slot_b': {'1': lun_list[:], '2': lun_list[:], '4': lun_list[:]}, + } + + test_map_chl = { + 'slot_a': ['1', '2', '4'], + 'slot_b': ['1', '2', '4'], + } + test_map_lun = ['2'] + self.driver = self._get_driver(configuration) + self.driver.mcs_dict = fake_mcs_dict + self.driver.map_dict = fake_map_dict + + map_chl, map_lun = self.driver._get_mapping_info_with_mpio() + + map_chl['slot_a'].sort() + map_chl['slot_b'].sort() + + self.assertDictEqual(test_map_chl, map_chl) + self.assertEqual(test_map_lun, map_lun) + + def test_mapping_info_with_mcs(self): + + configuration = copy.copy(self.configuration) + configuration.use_multipath_for_image_xfer = True + + fake_mcs_dict = { + 'slot_a': {'0': ['1', '2'], '2': ['4']}, + 'slot_b': {'0': ['1', '2']}, + } + lun_list = list(range(0, 127)) + fake_map_dict = { + 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, + 'slot_b': {'1': lun_list[:], '2': lun_list[:]}, + } + + test_map_chl = { + 'slot_a': ['1', '4'], + 'slot_b': ['1'], + } + test_map_lun = ['2'] + self.driver = self._get_driver(configuration) + self.driver.mcs_dict = fake_mcs_dict + self.driver.map_dict = fake_map_dict + + map_chl, map_lun = self.driver._get_mapping_info_with_mpio() + + map_chl['slot_a'].sort() + map_chl['slot_b'].sort() + + self.assertDictEqual(test_map_chl, map_chl) + self.assertEqual(test_map_lun, map_lun) + + def test_mapping_info_with_mcs_multi_group(self): + + configuration = copy.copy(self.configuration) + configuration.use_multipath_for_image_xfer = True + + fake_mcs_dict = { + 'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']}, + 'slot_b': {'0': ['1', '2']}, + } + lun_list = list(range(0, 127)) + fake_map_dict = { + 'slot_a': { + '1': lun_list[2:], + '2': lun_list[3:], + '3': lun_list[:], + '4': lun_list[1:], + '5': lun_list[:], + }, + 'slot_b': { + '1': lun_list[:], + '2': lun_list[:], + }, + } + + test_map_chl = { + 'slot_a': ['1', '3', '5'], + 'slot_b': ['1'], + } + test_map_lun = ['2'] + self.driver = self._get_driver(configuration) + self.driver.mcs_dict = fake_mcs_dict + self.driver.map_dict = fake_map_dict + + map_chl, map_lun = self.driver._get_mapping_info_with_mpio() + + map_chl['slot_a'].sort() + map_chl['slot_b'].sort() + + self.assertDictEqual(test_map_chl, map_chl) + self.assertEqual(test_map_lun, map_lun) + + def test_specific_channel_with_multipath(self): + + configuration = copy.copy(self.configuration) + configuration.infortrend_slots_a_channels_id = '1,2' + + test_map_dict = { + 'slot_a': {'1': [], '2': []}, + 'slot_b': {}, + } + test_target_dict = { + 'slot_a': {'1': '0', '2': '0'}, + 'slot_b': {}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + } + self._driver_setup(mock_commands, configuration) + + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + def test_specific_channel_with_multipath_r_model(self): + + configuration = copy.copy(self.configuration) + configuration.infortrend_slots_a_channels_id = '1,2' + configuration.infortrend_slots_b_channels_id = '1' + + test_map_dict = { + 'slot_a': {'1': [], '2': []}, + 'slot_b': {'1': []}, + } + test_target_dict = { + 'slot_a': {'1': '0', '2': '0'}, + 'slot_b': {'1': '1'}, + } + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + } + self._driver_setup(mock_commands, configuration) + self.driver._init_map_info() + + self.assertDictEqual(test_map_dict, self.driver.map_dict) + self.assertDictEqual(test_target_dict, self.driver.target_dict) + + @mock.patch.object(common_cli.LOG, 'info') + def test_create_volume(self, log_info): + + test_volume = self.cli_data.test_volume + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + self.cli_data.fake_partition_id[0], + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'ShowLV': self._mock_show_lv, + } + self._driver_setup(mock_commands) + + model_update = self.driver.create_volume(test_volume) + + self.assertDictEqual(test_model_update, model_update) + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_create_volume_with_create_fail(self): + test_volume = self.cli_data.test_volume + + mock_commands = { + 'CreatePartition': FAKE_ERROR_RETURN, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'ShowLV': self._mock_show_lv, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.create_volume, + test_volume) + + @mock.patch.object(common_cli.LOG, 'info') + def test_delete_volume_with_mapped(self, log_info): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + + mock_commands = { + 'ShowPartition': + self.cli_data.get_test_show_partition_detail_for_map( + test_partition_id), + 'DeleteMap': SUCCEED, + 'DeletePartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.delete_volume(test_volume) + + expect_cli_cmd = [ + mock.call('ShowPartition', '-l'), + mock.call('DeleteMap', 'part', test_partition_id, '-y'), + mock.call('DeletePartition', test_partition_id, '-y'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'info') + def test_delete_volume_without_mapped(self, log_info): + + test_volume = self.cli_data.test_volume_1 + test_partition_id = self.cli_data.fake_partition_id[1] + + mock_commands = { + 'ShowPartition': + self.cli_data.get_test_show_partition_detail( + test_volume['id'], '5DE94FF775D81C30'), + 'DeletePartition': SUCCEED, + } + self._driver_setup(mock_commands) + self.driver.delete_volume(test_volume) + + expect_cli_cmd = [ + mock.call('ShowPartition', '-l'), + mock.call('DeletePartition', test_partition_id, '-y'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(1, log_info.call_count) + + def test_delete_volume_with_delete_fail(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + + mock_commands = { + 'ShowPartition': + self.cli_data.get_test_show_partition_detail_for_map( + test_partition_id), + 'ShowReplica': self.cli_data.get_test_show_replica_detail(), + 'DeleteReplica': SUCCEED, + 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), + 'DeleteSnapshot': SUCCEED, + 'ShowMap': self.cli_data.get_test_show_map(), + 'DeleteMap': SUCCEED, + 'DeletePartition': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.delete_volume, + test_volume) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_delete_volume_with_partiton_not_found(self, log_warning): + + test_volume = self.cli_data.test_volume + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_empty_list(), + } + self._driver_setup(mock_commands) + + self.driver.delete_volume(test_volume) + + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'info') + def test_delete_volume_without_provider(self, log_info): + + test_system_id = self.cli_data.fake_system_id[0] + test_volume = copy.deepcopy(self.cli_data.test_volume) + test_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( + 'None', int(test_system_id, 16)) + test_partition_id = self.cli_data.fake_partition_id[0] + + mock_commands = { + 'ShowPartition': + self.cli_data.get_test_show_partition_detail_for_map( + test_partition_id), + 'ShowReplica': self.cli_data.get_test_show_replica_detail(), + 'DeleteReplica': SUCCEED, + 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), + 'DeleteSnapshot': SUCCEED, + 'ShowMap': self.cli_data.get_test_show_map(), + 'DeleteMap': SUCCEED, + 'DeletePartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.delete_volume(test_volume) + + self.assertEqual(1, log_info.call_count) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + @mock.patch.object(common_cli.LOG, 'info') + def test_create_cloned_volume(self, log_info): + + fake_partition_id = self.cli_data.fake_partition_id[0] + test_dst_volume = self.cli_data.test_dst_volume + test_dst_volume_id = test_dst_volume['id'] + test_src_volume = self.cli_data.test_volume + test_dst_part_id = self.cli_data.fake_partition_id[1] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + self.cli_data.fake_partition_id[1], + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CreateReplica': SUCCEED, + 'ShowLV': self._mock_show_lv, + 'ShowReplica': + self.cli_data.get_test_show_replica_detail_for_migrate( + fake_partition_id, test_dst_part_id, test_dst_volume_id), + 'DeleteReplica': SUCCEED, + } + self._driver_setup(mock_commands) + + model_update = self.driver.create_cloned_volume( + test_dst_volume, test_src_volume) + + self.assertDictEqual(test_model_update, model_update) + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_create_cloned_volume_with_create_replica_fail(self): + + test_dst_volume = self.cli_data.test_dst_volume + test_src_volume = self.cli_data.test_volume + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CreateReplica': FAKE_ERROR_RETURN, + 'ShowLV': self._mock_show_lv, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.create_cloned_volume, + test_dst_volume, + test_src_volume) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_create_export(self): + + test_volume = self.cli_data.test_volume + test_model_update = { + 'provider_location': test_volume['provider_location'], + } + self.driver = self._get_driver(self.configuration) + + model_update = self.driver.create_export(None, test_volume) + + self.assertDictEqual(test_model_update, model_update) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_get_volume_stats_full(self): + + test_volume_states = self.cli_data.test_volume_states_full + + mock_commands = { + 'InitCache': SUCCEED, + 'ShowLicense': self.cli_data.get_test_show_license_full(), + 'ShowLV': [self.cli_data.get_test_show_lv_tier(), + self.cli_data.get_test_show_lv()], + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CheckConnection': SUCCEED, + } + self._driver_setup(mock_commands) + self.driver.VERSION = '99.99' + self.driver.system_id = self.cli_data.fake_system_id[0] + + volume_states = self.driver.get_volume_stats(True) + + self.assertDictEqual.__self__.maxDiff = None + self.assertDictEqual(test_volume_states, volume_states) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_get_volume_stats_thin(self): + + test_volume_states = self.cli_data.test_volume_states_thin + + mock_commands = { + 'InitCache': SUCCEED, + 'ShowLicense': self.cli_data.get_test_show_license_thin(), + 'ShowLV': [self.cli_data.get_test_show_lv_tier(), + self.cli_data.get_test_show_lv()], + 'ShowPartition': self.cli_data.get_test_show_partition_detail(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CheckConnection': SUCCEED, + } + self._driver_setup(mock_commands) + self.driver.VERSION = '99.99' + self.driver.system_id = self.cli_data.fake_system_id[0] + + volume_states = self.driver.get_volume_stats(True) + + self.assertDictEqual.__self__.maxDiff = None + self.assertDictEqual(test_volume_states, volume_states) + + def test_get_volume_stats_fail(self): + + mock_commands = { + 'InitCache': SUCCEED, + 'ShowLicense': self.cli_data.get_test_show_license_thin(), + 'ShowLV': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.get_volume_stats) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_create_snapshot(self): + + fake_partition_id = self.cli_data.fake_partition_id[0] + fake_snapshot_id = self.cli_data.fake_snapshot_id[0] + + mock_commands = { + 'CreateSnapshot': SUCCEED, + 'ShowSnapshot': self.cli_data.get_test_show_snapshot( + partition_id=fake_partition_id, + snapshot_id=fake_snapshot_id), + 'ShowPartition': self.cli_data.get_test_show_partition(), + } + self._driver_setup(mock_commands) + + model_update = self.driver.create_snapshot(self.cli_data.test_snapshot) + + self.assertEqual(fake_snapshot_id, model_update['provider_location']) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_create_snapshot_without_partition_id(self): + + fake_partition_id = self.cli_data.fake_partition_id[0] + fake_snapshot_id = self.cli_data.fake_snapshot_id[0] + test_snapshot = self.cli_data.test_snapshot + + mock_commands = { + 'CreateSnapshot': SUCCEED, + 'ShowSnapshot': self.cli_data.get_test_show_snapshot( + partition_id=fake_partition_id, + snapshot_id=fake_snapshot_id), + 'ShowPartition': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.create_snapshot, + test_snapshot) + + def test_create_snapshot_with_create_fail(self): + + fake_partition_id = self.cli_data.fake_partition_id[0] + fake_snapshot_id = self.cli_data.fake_snapshot_id[0] + test_snapshot = self.cli_data.test_snapshot + + mock_commands = { + 'CreateSnapshot': FAKE_ERROR_RETURN, + 'ShowSnapshot': self.cli_data.get_test_show_snapshot( + partition_id=fake_partition_id, + snapshot_id=fake_snapshot_id), + 'ShowPartition': self.cli_data.get_test_show_partition(), + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.create_snapshot, + test_snapshot) + + def test_create_snapshot_with_show_fail(self): + + test_snapshot = self.cli_data.test_snapshot + + mock_commands = { + 'CreateSnapshot': SUCCEED, + 'ShowSnapshot': FAKE_ERROR_RETURN, + 'ShowPartition': self.cli_data.get_test_show_partition(), + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.create_snapshot, + test_snapshot) + + @mock.patch.object(common_cli.LOG, 'info') + def test_delete_snapshot(self, log_info): + + test_snapshot = self.cli_data.test_snapshot + + mock_commands = { + 'ShowReplica': self.cli_data.get_test_show_replica_detail(), + 'DeleteSnapshot': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.delete_snapshot(test_snapshot) + + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_delete_snapshot_without_provider_location(self, log_warning): + + test_snapshot = self.cli_data.test_snapshot_without_provider_location + + self.driver = self._get_driver(self.configuration) + self.driver.delete_snapshot(test_snapshot) + + self.assertEqual(1, log_warning.call_count) + + def test_delete_snapshot_with_fail(self): + + test_snapshot = self.cli_data.test_snapshot + + mock_commands = { + 'DeleteSnapshot': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.delete_snapshot, + test_snapshot) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + @mock.patch.object(common_cli.LOG, 'info') + def test_create_volume_from_snapshot(self, log_info): + + test_snapshot = self.cli_data.test_snapshot + test_snapshot_id = self.cli_data.fake_snapshot_id[0] + test_dst_volume = self.cli_data.test_dst_volume + test_dst_volume_id = test_dst_volume['id'] + test_dst_part_id = self.cli_data.fake_partition_id[1] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + self.cli_data.fake_partition_id[1], + int(self.cli_data.fake_system_id[0], 16) + ) + } + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CreateReplica': SUCCEED, + 'ShowReplica': + self.cli_data.get_test_show_replica_detail_for_migrate( + test_snapshot_id, test_dst_part_id, test_dst_volume_id), + 'DeleteReplica': SUCCEED, + } + self._driver_setup(mock_commands) + + model_update = self.driver.create_volume_from_snapshot( + test_dst_volume, test_snapshot) + + self.assertDictEqual(test_model_update, model_update) + self.assertEqual(1, log_info.call_count) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + @mock.patch.object(common_cli.LOG, 'info') + def test_create_volume_from_snapshot_with_different_size(self, log_info): + + test_snapshot = self.cli_data.test_snapshot + test_snapshot_id = self.cli_data.fake_snapshot_id[0] + test_dst_volume = self.cli_data.test_dst_volume + test_dst_volume['size'] = 10 + test_dst_volume_id = test_dst_volume['id'].replace('-', '') + test_dst_part_id = self.cli_data.fake_partition_id[1] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + self.cli_data.fake_partition_id[1], + int(self.cli_data.fake_system_id[0], 16)) + } + mock_commands = { + 'ShowSnapshot': + self.cli_data.get_test_show_snapshot_detail_filled_block(), + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'ShowDevice': self.cli_data.get_test_show_device(), + 'CreateReplica': SUCCEED, + 'ShowLV': self._mock_show_lv, + 'ShowReplica': + self.cli_data.get_test_show_replica_detail_for_migrate( + test_snapshot_id, test_dst_part_id, test_dst_volume_id), + 'DeleteReplica': SUCCEED, + } + self._driver_setup(mock_commands) + + model_update = self.driver.create_volume_from_snapshot( + test_dst_volume, test_snapshot) + self.assertDictEqual(test_model_update, model_update) + self.assertEqual(1, log_info.call_count) + self.assertEqual(10, test_dst_volume['size']) + + def test_create_volume_from_snapshot_without_provider_location( + self): + + test_snapshot = self.cli_data.test_snapshot_without_provider_location + test_dst_volume = self.cli_data.test_dst_volume + + self.driver = self._get_driver(self.configuration) + + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + test_dst_volume, + test_snapshot) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) + test_iscsi_properties = self.cli_data.test_iscsi_properties + test_target_protal = [test_iscsi_properties['data']['target_portal']] + test_target_iqn = [test_iscsi_properties['data']['target_iqn']] + + test_connector['multipath'] = False + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateMap': SUCCEED, + 'ShowNet': self.cli_data.get_test_show_net(), + 'ExecuteCommand': self.cli_data.get_fake_discovery( + test_target_iqn, test_target_protal), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual(test_iscsi_properties, properties) + + expect_cli_cmd = [ + mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', + 'iqn=%s' % test_connector['initiator']), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_iqn_not_exist(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1]) + test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) + test_iscsi_properties = self.cli_data.test_iscsi_properties + test_target_protal = [test_iscsi_properties['data']['target_portal']] + test_target_iqn = [test_iscsi_properties['data']['target_iqn']] + + test_connector['multipath'] = False + test_connector['initiator'] = test_initiator + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateIQN': SUCCEED, + 'CreateMap': SUCCEED, + 'ShowNet': self.cli_data.get_test_show_net(), + 'ExecuteCommand': self.cli_data.get_fake_discovery( + test_target_iqn, test_target_protal), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual(test_iscsi_properties, properties) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowChannel'), + mock.call('ShowIQN'), + mock.call('CreateIQN', test_initiator, test_initiator[-16:]), + mock.call('ShowNet'), + mock.call('ShowMap'), + mock.call('ShowMap', 'part=6A41315B0EDC8EB7'), + mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', + 'iqn=%s' % test_connector['initiator']), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_empty_map(self): + + test_volume = self.cli_data.test_volume + test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) + test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map + test_target_protal = [test_iscsi_properties['data']['target_portal']] + test_target_iqn = [test_iscsi_properties['data']['target_iqn']] + + test_connector['multipath'] = False + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_empty_list(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateMap': SUCCEED, + 'ShowNet': self.cli_data.get_test_show_net(), + 'ExecuteCommand': self.cli_data.get_fake_discovery( + test_target_iqn, test_target_protal), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual( + self.cli_data.test_iscsi_properties_empty_map, properties) + + def test_initialize_connection_with_create_map_fail(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_iscsi + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateMap': FAKE_ERROR_RETURN, + 'ShowNet': SUCCEED, + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.initialize_connection, + test_volume, + test_connector) + + def test_initialize_connection_with_get_ip_fail(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_iscsi + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateMap': SUCCEED, + 'ShowNet': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.initialize_connection, + test_volume, + test_connector) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_mcs(self): + + configuration = copy.copy(self.configuration) + + test_volume = self.cli_data.test_volume_1 + test_partition_id = self.cli_data.fake_partition_id[1] + test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi_1) + test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs_1 + test_target_portal = [test_iscsi_properties['data']['target_portal']] + test_target_iqn = [test_iscsi_properties['data']['target_iqn']] + + test_connector['multipath'] = False + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateIQN': SUCCEED, + 'CreateMap': SUCCEED, + 'ShowNet': self.cli_data.get_test_show_net(), + 'ExecuteCommand': self.cli_data.get_fake_discovery( + test_target_iqn, test_target_portal), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands, configuration) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual(test_iscsi_properties, properties) + + expect_cli_cmd = [ + mock.call('CreateMap', 'part', test_partition_id, '4', '0', '1', + 'iqn=%s' % test_connector['initiator']), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_initialize_connection_with_exist_map(self): + + configuration = copy.copy(self.configuration) + + test_volume = self.cli_data.test_volume + test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) + test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs + test_target_portal = [test_iscsi_properties['data']['target_portal']] + test_target_iqn = [test_iscsi_properties['data']['target_iqn']] + + test_connector['multipath'] = False + + mock_commands = { + 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), + 'ShowMap': self.cli_data.get_test_show_map(), + 'ShowIQN': self.cli_data.get_test_show_iqn(), + 'CreateMap': SUCCEED, + 'ShowNet': self.cli_data.get_test_show_net(), + 'ExecuteCommand': self.cli_data.get_fake_discovery( + test_target_iqn, test_target_portal), + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands, configuration) + + properties = self.driver.initialize_connection( + test_volume, test_connector) + + self.assertDictEqual(test_iscsi_properties, properties) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_extend_volume(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_new_size = 10 + test_expand_size = test_new_size - test_volume['size'] + + mock_commands = { + 'SetPartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.extend_volume(test_volume, test_new_size) + + expect_cli_cmd = [ + mock.call('SetPartition', 'expand', test_partition_id, + 'size=%sGB' % test_expand_size), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_extend_volume_mb(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_new_size = 5.5 + test_expand_size = round((test_new_size - test_volume['size']) * 1024) + + mock_commands = { + 'SetPartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.extend_volume(test_volume, test_new_size) + + expect_cli_cmd = [ + mock.call('SetPartition', 'expand', test_partition_id, + 'size=%sMB' % test_expand_size), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + def test_extend_volume_fail(self): + + test_volume = self.cli_data.test_volume + test_new_size = 10 + + mock_commands = { + 'SetPartition': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.extend_volume, + test_volume, + test_new_size) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_terminate_connection(self): + + test_volume = self.cli_data.test_volume + test_partition_id = self.cli_data.fake_partition_id[0] + test_connector = self.cli_data.test_connector_iscsi + + mock_commands = { + 'DeleteMap': SUCCEED, + 'ShowMap': [self.cli_data.get_test_show_map(), + self.cli_data.get_test_show_empty_list()], + 'DeleteIQN': SUCCEED, + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + self.driver.terminate_connection(test_volume, test_connector) + + expect_cli_cmd = [ + mock.call('ShowDevice'), + mock.call('ShowMap', 'part=%s' % test_partition_id), + mock.call('DeleteMap', + 'part', test_partition_id, '1', '0', '0', '-y'), + mock.call('DeleteMap', + 'part', test_partition_id, '1', '0', '1', '-y'), + mock.call('DeleteMap', + 'part', test_partition_id, '4', '0', '0', '-y'), + mock.call('ShowMap'), + mock.call('DeleteIQN', test_connector['initiator'][-16:]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + + def test_terminate_connection_fail(self): + + test_volume = self.cli_data.test_volume + test_connector = self.cli_data.test_connector_iscsi + + mock_commands = { + 'DeleteMap': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.terminate_connection, + test_volume, + test_connector) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + def test_migrate_volume(self): + + test_host = copy.deepcopy(self.cli_data.test_migrate_host) + fake_pool = copy.deepcopy(self.cli_data.fake_pool) + test_volume = self.cli_data.test_volume + test_volume_id = test_volume['id'] + test_src_part_id = self.cli_data.fake_partition_id[0] + test_dst_part_id = self.cli_data.fake_partition_id[2] + test_pair_id = self.cli_data.fake_pair_id[0] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + test_dst_part_id, + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition( + test_volume_id, fake_pool['pool_id']), + 'CreateReplica': SUCCEED, + 'ShowReplica': + self.cli_data.get_test_show_replica_detail_for_migrate( + test_src_part_id, test_dst_part_id, test_volume_id), + 'DeleteReplica': SUCCEED, + 'DeleteMap': SUCCEED, + 'DeletePartition': SUCCEED, + } + self._driver_setup(mock_commands) + self.driver.system_id = 'DEEC' + + rc, model_update = self.driver.migrate_volume(test_volume, test_host) + + expect_cli_cmd = [ + mock.call('CreatePartition', + fake_pool['pool_id'], + test_volume['id'], + 'size=%s' % (test_volume['size'] * 1024), + ''), + mock.call('ShowPartition'), + mock.call('CreateReplica', + 'Cinder-Migrate', + 'part', test_src_part_id, + 'part', test_dst_part_id, + 'type=mirror'), + mock.call('ShowReplica', '-l'), + mock.call('DeleteReplica', test_pair_id, '-y'), + mock.call('DeleteMap', 'part', test_src_part_id, '-y'), + mock.call('DeletePartition', test_src_part_id, '-y'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertTrue(rc) + self.assertDictEqual(test_model_update, model_update) + + @mock.patch.object(common_cli.LOG, 'error') + def test_migrate_volume_with_invalid_storage(self, log_error): + + fake_host = self.cli_data.fake_host + test_volume = self.cli_data.test_volume + + mock_commands = { + 'ShowLV': self._mock_show_lv_for_migrate, + } + self._driver_setup(mock_commands) + + rc, model_update = self.driver.migrate_volume(test_volume, fake_host) + + self.assertFalse(rc) + self.assertIsNone(model_update) + self.assertEqual(1, log_error.call_count) + + def test_migrate_volume_with_get_part_id_fail(self): + + test_host = copy.deepcopy(self.cli_data.test_migrate_host) + test_volume = self.cli_data.test_volume + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition(), + 'DeleteMap': SUCCEED, + 'CreateReplica': SUCCEED, + 'CreateMap': SUCCEED, + 'ShowLV': self._mock_show_lv_for_migrate, + } + self._driver_setup(mock_commands) + self.driver.system_id = 'DEEC' + + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.migrate_volume, + test_volume, + test_host) + + def test_migrate_volume_with_create_replica_fail(self): + + test_host = copy.deepcopy(self.cli_data.test_migrate_host) + fake_pool = copy.deepcopy(self.cli_data.fake_pool) + test_volume = self.cli_data.test_volume + + mock_commands = { + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition( + test_volume['id'], fake_pool['pool_id']), + 'DeleteMap': SUCCEED, + 'CreateReplica': FAKE_ERROR_RETURN, + 'CreateMap': SUCCEED, + 'ShowLV': self._mock_show_lv_for_migrate, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.migrate_volume, + test_volume, + test_host) + + def test_manage_existing_get_size(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + test_pool = self.cli_data.fake_lv_id[0] + test_ref_volume_id = test_ref_volume['source-id'] + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), + 'ShowMap': SUCCEED, + } + + self._driver_setup(mock_commands) + + size = self.driver.manage_existing_get_size( + test_volume, test_ref_volume) + + expect_cli_cmd = [ + mock.call('ShowPartition', '-l'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(20, size) + + def test_manage_existing_get_size_with_name(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_name + test_pool = self.cli_data.fake_lv_id[0] + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + test_ref_volume['source-name'], test_pool), + 'ShowMap': SUCCEED, + } + + self._driver_setup(mock_commands) + + size = self.driver.manage_existing_get_size( + test_volume, test_ref_volume) + + expect_cli_cmd = [ + mock.call('ShowPartition', '-l'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(20, size) + + def test_manage_existing_get_size_in_use(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume + test_pool = self.cli_data.fake_lv_id[0] + test_ref_volume_id = test_ref_volume['source-id'] + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), + } + self._driver_setup(mock_commands) + + self.assertRaises( + exception.VolumeDriverException, + self.driver.manage_existing_get_size, + test_volume, + test_ref_volume) + + def test_manage_existing_get_size_no_source_id(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_dst_volume + self.driver = self._get_driver(self.configuration) + + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + test_volume, + test_ref_volume) + + def test_manage_existing_get_size_show_part_fail(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + + mock_commands = { + 'ShowPartition': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.manage_existing_get_size, + test_volume, + test_ref_volume) + + def test_manage_existing_get_size_with_not_exist(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail(), + } + self._driver_setup(mock_commands) + + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + test_volume, + test_ref_volume) + + @mock.patch.object(common_cli.LOG, 'info') + def test_manage_existing(self, log_info): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + test_pool = self.cli_data.fake_lv_id[0] + test_partition_id = self.cli_data.test_dst_volume['id'] + test_ref_volume_id = test_ref_volume['source-id'] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + test_partition_id, + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), + 'SetPartition': SUCCEED, + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + model_update = self.driver.manage_existing( + test_volume, test_ref_volume) + + expect_cli_cmd = [ + mock.call('ShowPartition', '-l'), + mock.call('SetPartition', test_partition_id, + 'name=%s' % test_volume['id']), + mock.call('ShowDevice'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(1, log_info.call_count) + self.assertDictEqual(test_model_update, model_update) + + def test_manage_existing_rename_fail(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + test_pool = self.cli_data.fake_lv_id[0] + test_ref_volume_id = test_ref_volume['source-id'] + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), + 'SetPartition': FAKE_ERROR_RETURN, + } + self._driver_setup(mock_commands) + + self.assertRaises( + common_cli.InfortrendCliException, + self.driver.manage_existing, + test_volume, + test_ref_volume) + + def test_manage_existing_with_part_not_found(self): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_id + + mock_commands = { + 'ShowPartition': + self.cli_data.get_test_show_partition_detail(), + 'SetPartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.assertRaises( + exception.ManageExistingInvalidReference, + self.driver.manage_existing, + test_volume, + test_ref_volume) + + @mock.patch.object(common_cli.LOG, 'info') + def test_manage_existing_with_import(self, log_info): + + test_volume = self.cli_data.test_volume + test_ref_volume = self.cli_data.test_ref_volume_with_name + test_pool = self.cli_data.fake_lv_id[0] + test_partition_id = self.cli_data.fake_partition_id[2] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + test_partition_id, + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + test_ref_volume['source-name'], test_pool), + 'SetPartition': SUCCEED, + 'ShowDevice': self.cli_data.get_test_show_device(), + } + self._driver_setup(mock_commands) + + model_update = self.driver.manage_existing( + test_volume, test_ref_volume) + + expect_cli_cmd = [ + mock.call('SetPartition', test_partition_id, + 'name=%s' % test_volume['id']), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(1, log_info.call_count) + self.assertDictEqual(test_model_update, model_update) + + @mock.patch.object(common_cli.LOG, 'info') + def test_unmanage(self, log_info): + + test_volume = self.cli_data.test_volume + test_volume_id = test_volume['id'] + test_partition_id = self.cli_data.fake_partition_id[0] + + mock_commands = { + 'SetPartition': SUCCEED, + } + self._driver_setup(mock_commands) + + self.driver.unmanage(test_volume) + + expect_cli_cmd = [ + mock.call( + 'SetPartition', + test_partition_id, + 'name=cinder-unmanaged-%s' % test_volume_id[:-17]), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'info') + def test_retype_without_change(self, log_info): + + test_volume = self.cli_data.test_volume + test_new_type = self.cli_data.test_new_type + test_diff = {'extra_specs': {}} + test_host = self.cli_data.test_migrate_host_2 + + self.driver = self._get_driver(self.configuration) + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + self.assertEqual(1, log_info.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_retype_with_change_global_provision(self, log_warning): + + test_volume = self.cli_data.test_volume + test_new_type = self.cli_data.test_new_type + test_diff = self.cli_data.test_diff + test_host = self.cli_data.test_migrate_host_2 + + self.driver = self._get_driver(self.configuration) + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertFalse(rc) + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_retype_with_change_individual_provision(self, log_warning): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'LV-1:thin', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:provisioning': ('LV-2:thin;LV-1:full', 'LV-1:thin') + } + } + + self.driver = self._get_driver(self.configuration) + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertFalse(rc) + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_retype_with_change_mixed_provision(self, log_warning): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'LV-1:thin', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:provisioning': ('full', 'LV-1:thin') + } + } + + self.driver = self._get_driver(self.configuration) + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertFalse(rc) + self.assertEqual(1, log_warning.call_count) + + def test_retype_with_change_same_provision(self): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'LV-1:thin', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:provisioning': ('thin', 'LV-1:thin') + } + } + + self.driver = self._get_driver(self.configuration) + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + + def test_retype_with_change_global_tier(self): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'thin', + 'infortrend:tiering': '2,3', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:tiering': ('0,1', '2,3') + } + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + 'SetPartition': SUCCEED, + 'SetLV': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition_detail(), + } + self._driver_setup(mock_commands) + self.driver.tier_pools_dict = { + self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], + } + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + + def test_retype_with_change_individual_tier(self): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'thin', + 'infortrend:tiering': 'LV-1:2,3', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:tiering': ('LV-1:0,1', 'LV-1:2,3') + } + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + 'SetPartition': SUCCEED, + 'SetLV': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition_detail(), + } + self._driver_setup(mock_commands) + self.driver.tier_pools_dict = { + self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], + } + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + + def test_retype_change_tier_with_multi_settings(self): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'thin', + 'infortrend:tiering': 'LV-2:0;LV-1:2,3', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:tiering': ('LV-1:0,1', 'LV-2:0;LV-1:2,3') + } + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + 'SetPartition': SUCCEED, + 'SetLV': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition_detail(), + } + self._driver_setup(mock_commands) + self.driver.tier_pools_dict = { + self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], + } + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + + def test_retype_change_with_tier_not_exist(self): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'thin', + 'infortrend:tiering': 'LV-2:0;LV-1:2,3', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:tiering': ('LV-1:0,1', 'LV-2:0;LV-1:2,3') + } + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + } + self._driver_setup(mock_commands) + self.driver.tier_pools_dict = { + self.cli_data.fake_lv_id[0]: [0, 1, 2], + } + + self.assertRaises( + exception.VolumeDriverException, + self.driver.retype, + None, test_volume, test_new_type, + test_diff, test_host) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_retype_change_with_not_a_tier_pool(self, log_warning): + + test_volume = self.cli_data.test_volume + test_host = self.cli_data.test_migrate_host_2 + test_new_type = { + 'name': 'type1', + 'qos_specs_id': None, + 'deleted': False, + 'extra_specs': { + 'infortrend:provisioning': 'full', + 'infortrend:tiering': 'LV-1:2', + }, + 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', + } + test_diff = { + 'extra_specs': { + 'infortrend:tiering': ('', 'LV-1:2') + } + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + } + self._driver_setup(mock_commands) + self.driver.tier_pools_dict = { + self.cli_data.fake_lv_id[2]: [0, 1, 2], + } + + rc = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + self.assertTrue(rc) + self.assertEqual(1, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_retype_with_migrate(self): + + fake_pool = copy.deepcopy(self.cli_data.fake_pool) + test_host = copy.deepcopy(self.cli_data.test_migrate_host) + test_volume = self.cli_data.test_volume + test_volume_id = test_volume['id'] + test_new_type = self.cli_data.test_new_type + test_diff = self.cli_data.test_diff + test_src_part_id = self.cli_data.fake_partition_id[0] + test_dst_part_id = self.cli_data.fake_partition_id[2] + test_pair_id = self.cli_data.fake_pair_id[0] + test_model_update = { + 'provider_location': 'partition_id^%s@system_id^%s' % ( + test_dst_part_id, + int(self.cli_data.fake_system_id[0], 16) + ) + } + + mock_commands = { + 'ShowSnapshot': SUCCEED, + 'CreatePartition': SUCCEED, + 'ShowPartition': self.cli_data.get_test_show_partition( + test_volume_id, fake_pool['pool_id']), + 'CreateReplica': SUCCEED, + 'ShowReplica': + self.cli_data.get_test_show_replica_detail_for_migrate( + test_src_part_id, test_dst_part_id, test_volume_id), + 'DeleteReplica': SUCCEED, + 'DeleteMap': SUCCEED, + 'DeletePartition': SUCCEED, + } + self._driver_setup(mock_commands) + self.driver.system_id = 'DEEC' + + rc, model_update = self.driver.retype( + None, test_volume, test_new_type, test_diff, test_host) + + min_size = int(test_volume['size'] * 1024 * 0.2) + create_params = 'init=disable min=%sMB' % min_size + expect_cli_cmd = [ + mock.call('ShowSnapshot', 'part=%s' % test_src_part_id), + mock.call( + 'CreatePartition', + fake_pool['pool_id'], + test_volume['id'], + 'size=%s' % (test_volume['size'] * 1024), + create_params, + ), + mock.call('ShowPartition'), + mock.call( + 'CreateReplica', + 'Cinder-Migrate', + 'part', test_src_part_id, + 'part', test_dst_part_id, + 'type=mirror' + ), + mock.call('ShowReplica', '-l'), + mock.call('DeleteReplica', test_pair_id, '-y'), + mock.call('DeleteMap', 'part', test_src_part_id, '-y'), + mock.call('DeletePartition', test_src_part_id, '-y'), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertTrue(rc) + self.assertDictEqual(test_model_update, model_update) + + @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) + @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) + def test_update_migrated_volume(self): + src_volume = self.cli_data.test_volume + dst_volume = copy.deepcopy(self.cli_data.test_dst_volume) + test_dst_part_id = self.cli_data.fake_partition_id[1] + dst_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( + test_dst_part_id, int(self.cli_data.fake_system_id[0], 16)) + test_model_update = { + '_name_id': None, + 'provider_location': dst_volume['provider_location'], + } + + mock_commands = { + 'SetPartition': SUCCEED, + } + self._driver_setup(mock_commands) + + model_update = self.driver.update_migrated_volume( + None, src_volume, dst_volume, 'available') + + expect_cli_cmd = [ + mock.call('SetPartition', test_dst_part_id, + 'name=%s' % src_volume['id']), + ] + self._assert_cli_has_calls(expect_cli_cmd) + self.assertDictEqual(test_model_update, model_update) + + @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) + def test_update_migrated_volume_rename_fail(self): + src_volume = self.cli_data.test_volume + dst_volume = self.cli_data.test_dst_volume + dst_volume['_name_id'] = 'fake_name_id' + test_dst_part_id = self.cli_data.fake_partition_id[1] + dst_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( + test_dst_part_id, int(self.cli_data.fake_system_id[0], 16)) + + mock_commands = { + 'SetPartition': FAKE_ERROR_RETURN + } + self._driver_setup(mock_commands) + model_update = self.driver.update_migrated_volume( + None, src_volume, dst_volume, 'available') + self.assertEqual({'_name_id': 'fake_name_id'}, model_update) + + def test_get_extraspecs_set_with_default_setting(self): + test_extraspecs = {} + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + } + + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + def test_get_extraspecs_set_with_global_settings(self): + test_extraspecs = { + 'infortrend:tiering': '1,2', + 'infortrend:provisioning': 'thin', + } + + test_result = { + 'global_provisioning': 'thin', + 'global_tiering': [1, 2], + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + def test_get_extraspecs_set_with_tier_global_settings(self): + test_extraspecs = { + 'infortrend:tiering': '1,2', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + def test_get_extraspecs_set_with_provision_global_settings(self): + test_extraspecs = { + 'infortrend:provisioning': 'thin', + } + + test_result = { + 'global_provisioning': 'thin', + 'global_tiering': 'all', + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + def test_get_extraspecs_set_with_individual_tier_settings(self): + test_extraspecs = { + 'infortrend:tiering': 'LV-0:0;LV-1:1,2', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV-0': { + 'tiering': [0], + }, + 'LV-1': { + 'tiering': [1, 2], + }, + } + self.driver = self._get_driver(self.configuration) + self.driver.pool_dict = {'LV-0': '', 'LV-1': '', 'LV-2': ''} + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_get_extraspecs_set_with_lv0_not_set_in_config(self, log_warning): + test_extraspecs = { + 'infortrend:tiering': 'LV-0:0;LV-1:1,2', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV-1': { + 'tiering': [1, 2], + }, + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + self.assertEqual(1, log_warning.call_count) + + def test_get_extraspecs_set_with_individual_provision_settings(self): + test_extraspecs = { + 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV-1': { + 'provisioning': 'full', + }, + 'LV-2': { + 'provisioning': 'thin', + }, + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + def test_get_extraspecs_set_with_mixed_settings(self): + test_extraspecs = { + 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', + 'infortrend:tiering': '1,2', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + 'LV-1': { + 'provisioning': 'full', + }, + 'LV-2': { + 'provisioning': 'thin', + }, + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_get_extraspecs_set_with_err_tier(self, log_warning): + test_extraspecs = { + 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', + 'infortrend:tiering': 'LV-1:4,3; LV-2:-1,0', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV-1': { + 'provisioning': 'full', + 'tiering': 'Err:[3, 4]', + }, + 'LV-2': { + 'provisioning': 'thin', + 'tiering': 'Err:[0, -1]', + }, + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + self.assertEqual(2, log_warning.call_count) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_get_extraspecs_set_with_err_provision(self, log_warning): + test_extraspecs = { + 'infortrend:provisioning': 'LV-1:FOO; LV-2:Bar', + 'infortrend:tiering': '1,2', + } + + test_result = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + 'LV-1': { + 'provisioning': 'Err:FOO', + }, + 'LV-2': { + 'provisioning': 'Err:Bar', + }, + } + self.driver = self._get_driver(self.configuration) + result = self.driver._get_extraspecs_set(test_extraspecs) + + self.assertEqual(test_result, result) + self.assertEqual(2, log_warning.call_count) + + def test_get_pool_extraspecs_global(self): + test_extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV-2': { + 'provisioning': 'thin', + }, + } + + test_result = { + 'provisioning': 'full', + 'tiering': 'all', + } + + self.driver = self._get_driver(self.configuration) + result = self.driver._get_pool_extraspecs( + 'LV-1', test_extraspecs_set) + + self.assertEqual(test_result, result) + + def test_get_pool_extraspecs_individual(self): + test_extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + 'LV-1': { + 'provisioning': 'full', + 'tiering': [0], + }, + 'LV-2': { + 'provisioning': 'thin', + }, + } + + test_result = { + 'provisioning': 'full', + 'tiering': [0], + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + } + self._driver_setup(mock_commands) + + result = self.driver._get_pool_extraspecs( + 'LV-1', test_extraspecs_set) + + self.assertEqual(test_result, result) + + def test_get_pool_extraspecs_mixed(self): + test_extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + 'LV-1': { + 'provisioning': 'full', + }, + 'LV-2': { + 'provisioning': 'thin', + }, + } + + test_result = { + 'provisioning': 'thin', + 'tiering': [1, 2], + } + mock_commands = { + 'ShowLV': self._mock_show_lv(), + } + self._driver_setup(mock_commands) + + result = self.driver._get_pool_extraspecs( + 'LV-2', test_extraspecs_set) + + self.assertEqual(test_result, result) + + def test_get_pool_extraspecs_conflict(self): + test_extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': [1, 2], + 'LV-1': { + 'provisioning': 'full', + }, + 'LV-2': { + 'provisioning': 'thin', + }, + } + + mock_commands = { + 'ShowLV': self._mock_show_lv(), + } + self._driver_setup(mock_commands) + + self.assertRaises( + exception.VolumeDriverException, + self.driver._get_pool_extraspecs, + 'LV-1', test_extraspecs_set) + + def test_get_manageable_volumes(self): + fake_cinder_volumes = self.cli_data.fake_cinder_volumes + + mock_commands = { + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + volume_id='hello-there', + pool_id=self.cli_data.fake_lv_id[2]) + } + + ans = [{ + 'reference': { + 'source-name': self.cli_data.fake_volume_id[0], + 'source-id': self.cli_data.fake_partition_id[0], + 'pool-name': 'LV-1' + }, + 'size': 20, + 'safe_to_manage': False, + 'reason_not_safe': 'Volume In-use', + 'cinder_id': None, + 'extra_info': None + }, { + 'reference': { + 'source-name': self.cli_data.fake_volume_id[1], + 'source-id': self.cli_data.fake_partition_id[1], + 'pool-name': 'LV-1' + }, + 'size': 20, + 'safe_to_manage': False, + 'reason_not_safe': 'Already Managed', + 'cinder_id': self.cli_data.fake_volume_id[1], + 'extra_info': None + }, { + 'reference': { + 'source-name': 'hello-there', + 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', + 'pool-name': 'LV-1' + }, + 'size': 20, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None + }] + + self._driver_setup(mock_commands) + result = self.driver.get_manageable_volumes(fake_cinder_volumes, + None, 1000, 0, + ['reference'], ['desc']) + ans = cv_utils.paginate_entries_list(ans, None, 1000, 0, + ['reference'], ['desc']) + self.assertEqual(ans, result) + + def test_get_manageable_snapshots(self): + fake_cinder_snapshots = self.cli_data.fake_cinder_snapshots + + mock_commands = { + 'ShowSnapshot': + self.cli_data.get_test_show_snapshot_get_manage(), + 'ShowPartition': self.cli_data.get_test_show_partition_detail( + volume_id='hello-there', + pool_id=self.cli_data.fake_lv_id[2]) + } + + self._driver_setup(mock_commands) + + ans = [{ + 'reference': { + 'source-id': self.cli_data.fake_snapshot_id[0], + 'source-name': self.cli_data.fake_snapshot_name[0], + }, + 'size': 20, + 'safe_to_manage': False, + 'reason_not_safe': 'Volume In-use', + 'cinder_id': None, + 'extra_info': None, + 'source_reference': { + 'volume-id': self.cli_data.fake_volume_id[0] + } + }, { + 'reference': { + 'source-id': self.cli_data.fake_snapshot_id[1], + 'source-name': self.cli_data.fake_snapshot_name[1], + }, + 'size': 20, + 'safe_to_manage': False, + 'reason_not_safe': 'Already Managed', + 'cinder_id': self.cli_data.fake_snapshot_name[1], + 'extra_info': None, + 'source_reference': { + 'volume-id': self.cli_data.fake_volume_id[1] + } + }, { + 'reference': { + 'source-id': self.cli_data.fake_snapshot_id[2], + 'source-name': self.cli_data.fake_snapshot_name[2], + }, + 'size': 20, + 'safe_to_manage': True, + 'reason_not_safe': None, + 'cinder_id': None, + 'extra_info': None, + 'source_reference': { + 'volume-id': 'hello-there' + } + }] + + result = self.driver.get_manageable_snapshots(fake_cinder_snapshots, + None, 1000, 0, + ['reference'], ['desc']) + ans = cv_utils.paginate_entries_list(ans, None, 1000, 0, + ['reference'], ['desc']) + self.assertEqual(ans, result) + + def test_manage_existing_snapshot(self): + fake_snapshot = self.cli_data.fake_cinder_snapshots[0] + fake_ref_from_id = { + 'source-id': self.cli_data.fake_snapshot_id[1] + } + fake_ref_from_name = { + 'source-name': self.cli_data.fake_snapshot_name[1] + } + + mock_commands = { + 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named(), + 'SetSnapshot': (0, None) + } + + ans = {'provider_location': self.cli_data.fake_snapshot_id[1]} + + self._driver_setup(mock_commands) + result_from_id = self.driver.manage_existing_snapshot( + fake_snapshot, fake_ref_from_id) + result_from_name = self.driver.manage_existing_snapshot( + fake_snapshot, fake_ref_from_name) + + self.assertEqual(ans, result_from_id) + self.assertEqual(ans, result_from_name) + + @mock.patch.object(common_cli.LOG, 'warning') + def test_get_snapshot_ref_data_err_and_warning(self, mock_warning): + fake_snapshot = self.cli_data.fake_cinder_snapshots[0] + fake_ref_err1 = { + 'invalid-key': 'invalid-content' + } + fake_ref_err2 = { + 'source-id': 'invalid-content' + } + fake_ref_err_and_warning = { + 'source-name': '---' + } + + mock_commands = { + 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named() + } + + self._driver_setup(mock_commands) + + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_snapshot, + fake_snapshot, fake_ref_err1) + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_snapshot, + fake_snapshot, fake_ref_err2) + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_snapshot, + fake_snapshot, fake_ref_err_and_warning) + self.assertEqual(1, mock_warning.call_count) + + def test_manage_existing_snapshot_get_size(self): + fake_snapshot = self.cli_data.fake_cinder_snapshots[0] + fake_ref = { + 'source-id': self.cli_data.fake_snapshot_id[1] + } + + mock_commands = { + 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named(), + 'ShowPartition': self.cli_data.get_test_show_partition() + } + + self._driver_setup(mock_commands) + + result = self.driver.manage_existing_snapshot_get_size(fake_snapshot, + fake_ref) + self.assertEqual(20, result) + + def test_unmanage_snapshot(self): + fake_snapshot = self.cli_data.Fake_cinder_snapshot( + self.cli_data.fake_snapshot_name[1], + self.cli_data.fake_snapshot_id[1] + ) + + mock_commands = { + 'SetSnapshot': (0, None), + } + + expect_cli_cmd = [ + mock.call( + 'SetSnapshot', self.cli_data.fake_snapshot_id[1], + 'name=cinder-unmanaged-%s' % + self.cli_data.fake_snapshot_name[1][:-17] + ) + ] + self._driver_setup(mock_commands) + self.driver.unmanage_snapshot(fake_snapshot) + self._assert_cli_has_calls(expect_cli_cmd) diff --git a/cinder/volume/drivers/infortrend/__init__.py b/cinder/volume/drivers/infortrend/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py new file mode 100644 index 00000000000..eb25d1a22d2 --- /dev/null +++ b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py @@ -0,0 +1,387 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Fibre Channel Driver for Infortrend Eonstor based on CLI. +""" + + +from oslo_log import log as logging + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class InfortrendCLIFCDriver(driver.FibreChannelDriver): + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Infortrend_Storage_CI" + VERSION = common_cli.InfortrendCommon.VERSION + + def __init__(self, *args, **kwargs): + super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs) + self.common = common_cli.InfortrendCommon( + 'FC', configuration=self.configuration) + self.VERSION = self.common.VERSION + + def do_setup(self, context): + """Any initialization the volume driver does while starting. + + note: This runs before check_for_setup_error + """ + + LOG.debug('do_setup start') + self.common.do_setup() + + def check_for_setup_error(self): + LOG.debug('check_for_setup_error start') + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Creates a volume. + + Can optionally return a Dictionary of changes + to the volume object to be persisted. + """ + LOG.debug('create_volume volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + return self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + LOG.debug( + 'create_volume_from_snapshot volume id=%(volume_id)s ' + 'snapshot id=%(snapshot_id)s', { + 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) + return self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.debug( + 'create_cloned_volume volume id=%(volume_id)s ' + 'src_vref provider_location=%(provider_location)s', { + 'volume_id': volume['id'], + 'provider_location': src_vref['provider_location']}) + return self.common.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + LOG.debug( + 'extend_volume volume id=%(volume_id)s new size=%(size)s', { + 'volume_id': volume['id'], 'size': new_size}) + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Deletes a volume.""" + LOG.debug('delete_volume volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + return self.common.delete_volume(volume) + + def migrate_volume(self, ctxt, volume, host): + """Migrate the volume to the specified host. + + Returns a boolean indicating whether the migration occurred, as well as + model_update. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { + 'volume_id': volume['id'], 'host': host['host']}) + return self.common.migrate_volume(volume, host) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + LOG.debug( + 'create_snapshot snapshot id=%(snapshot_id)s ' + 'volume id=%(volume_id)s', { + 'snapshot_id': snapshot['id'], + 'volume_id': snapshot['volume_id']}) + return self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + LOG.debug( + 'delete_snapshot snapshot id=%(snapshot_id)s ' + 'volume id=%(volume_id)s', { + 'snapshot_id': snapshot['id'], + 'volume_id': snapshot['volume_id']}) + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a volume.""" + pass + + def create_export(self, context, volume, connector): + """Exports the volume. + + Can optionally return a Dictionary of changes + to the volume object to be persisted. + """ + LOG.debug( + 'create_export volume provider_location=%(provider_location)s', { + 'provider_location': volume['provider_location']}) + return self.common.create_export(context, volume) + + def remove_export(self, context, volume): + """Removes an export for a volume.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection information. + + Assign any created volume to a compute node/host so that it can be + used from that host. + + The driver returns a driver_volume_type of 'fibre_channel'. + The target_wwn can be a single entry or a list of wwns that + correspond to the list of remote wwn(s) that will export the volume. + The initiator_target_map is a map that represents the remote wwn(s) + and a list of wwns which are visible to the remote wwn(s). + Example return values: + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': '1234567890123', + 'initiator_target_map': { + '1122334455667788': ['1234567890123'] + } + } + } + + or + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': ['1234567890123', '0987654321321'], + 'initiator_target_map': { + '1122334455667788': ['1234567890123', + '0987654321321'] + } + } + } + """ + LOG.debug( + 'initialize_connection volume id=%(volume_id)s ' + 'connector initiator=%(initiator)s', { + 'volume_id': volume['id'], + 'initiator': connector['initiator']}) + return self.common.initialize_connection(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + LOG.debug('terminate_connection volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + return self.common.terminate_connection(volume, connector) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + LOG.debug('get_volume_stats refresh=%(refresh)s', { + 'refresh': refresh}) + return self.common.get_volume_stats(refresh) + + def manage_existing(self, volume, existing_ref): + """Manage an existing lun in the array. + + The lun should be in a manageable pool backend, otherwise + error would return. + Rename the backend storage object so that it matches the, + volume['name'] which is how drivers traditionally map between a + cinder volume and the associated backend storage object. + + :param existing_ref: Driver-specific information used to identify + a volume + """ + LOG.debug( + 'manage_existing volume: %(volume)s ' + 'existing_ref source: %(source)s', { + 'volume': volume, + 'source': existing_ref}) + return self.common.manage_existing(volume, existing_ref) + + def unmanage(self, volume): + """Removes the specified volume from Cinder management. + + Does not delete the underlying backend storage object. + + :param volume: Cinder volume to unmanage + """ + LOG.debug('unmanage volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + self.common.unmanage(volume) + + def manage_existing_get_size(self, volume, existing_ref): + """Return size of volume to be managed by manage_existing. + + When calculating the size, round up to the next GB. + """ + LOG.debug( + 'manage_existing_get_size volume: %(volume)s ' + 'existing_ref source: %(source)s', { + 'volume': volume, + 'source': existing_ref}) + return self.common.manage_existing_get_size(volume, existing_ref) + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param new_type: A dictionary describing the volume type to convert to + :param diff: A dictionary with the difference between the two types + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug( + 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { + 'volume_id': volume['id'], 'type_id': new_type['id']}) + return self.common.retype(ctxt, volume, new_type, diff, host) + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + """Return model update for migrated volume. + + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :param original_volume_status: The status of the original volume + :returns: model_update to update DB with any needed changes + """ + LOG.debug( + 'update migrated volume original volume id= %(volume_id)s ' + 'new volume id=%(new_volume_id)s', { + 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) + return self.common.update_migrated_volume(ctxt, volume, new_volume, + original_volume_status) + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder.""" + + LOG.debug( + 'get_manageable_volumes CALLED ' + 'cinder_volumes: %(volume)s, ' + 'marker: %(mkr)s, ' + 'limit: %(lmt)s, ' + 'offset: %(_offset)s, ' + 'sort_keys: %(s_key)s, ' + 'sort_dirs: %(sort_dir)s', { + 'volume': cinder_volumes, + 'mkr': marker, + 'lmt': limit, + '_offset': offset, + 's_key': sort_keys, + 'sort_dir': sort_dirs + } + ) + return self.common.get_manageable_volumes(cinder_volumes, marker, + limit, offset, sort_keys, + sort_dirs) + + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings an existing backend storage object under Cinder management. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + """ + + LOG.debug( + 'manage_existing_snapshot CALLED ' + 'snapshot: %(si)s, ' + 'existing_ref: %(ref)s', { + 'si': snapshot, 'ref': existing_ref + } + ) + return self.common.manage_existing_snapshot(snapshot, existing_ref) + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + :returns size: Volume snapshot size in GiB (integer) + """ + + LOG.debug( + 'manage_existing_snapshot_get_size CALLED ' + 'snapshot: %(si)s, ' + 'existing_ref: %(ref)s', { + 'si': snapshot, 'ref': existing_ref + } + ) + return self.common.manage_existing_snapshot_get_size(snapshot, + existing_ref) + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder.""" + + LOG.debug( + 'get_manageable_volumes CALLED ' + 'cinder_snapshots: %(volume)s, ' + 'marker: %(mkr)s, ' + 'limit: %(lmt)s, ' + 'offset: %(_offset)s, ' + 'sort_keys: %(s_key)s, ' + 'sort_dirs: %(sort_dir)s', { + 'volume': cinder_snapshots, + 'mkr': marker, + 'lmt': limit, + '_offset': offset, + 's_key': sort_keys, + 'sort_dir': sort_dirs + } + ) + return self.common.get_manageable_snapshots(cinder_snapshots, marker, + limit, offset, sort_keys, + sort_dirs) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param snapshot: Cinder volume snapshot to unmanage + """ + LOG.debug( + 'manage_existing_snapshot_get_size CALLED ' + 'snapshot: %(si)s', { + 'si': snapshot + } + ) + return self.common.unmanage_snapshot(snapshot) diff --git a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py new file mode 100644 index 00000000000..30b0b2fc9a2 --- /dev/null +++ b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py @@ -0,0 +1,363 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iSCSI Driver for Infortrend Eonstor based on CLI. +""" + +from oslo_log import log as logging + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class InfortrendCLIISCSIDriver(driver.ISCSIDriver): + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Infortrend_Storage_CI" + VERSION = common_cli.InfortrendCommon.VERSION + + def __init__(self, *args, **kwargs): + super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs) + self.common = common_cli.InfortrendCommon( + 'iSCSI', configuration=self.configuration) + self.VERSION = self.common.VERSION + + def do_setup(self, context): + """Any initialization the volume driver does while starting. + + note: This runs before check_for_setup_error + """ + + LOG.debug('do_setup start') + self.common.do_setup() + + def check_for_setup_error(self): + LOG.debug('check_for_setup_error start') + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Creates a volume. + + Can optionally return a Dictionary of changes + to the volume object to be persisted. + """ + LOG.debug('create_volume volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + return self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + LOG.debug( + 'create_volume_from_snapshot volume id=%(volume_id)s ' + 'snapshot id=%(snapshot_id)s', { + 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) + return self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.debug( + 'create_cloned_volume volume id=%(volume_id)s ' + 'src_vref provider_location=%(provider_location)s', { + 'volume_id': volume['id'], + 'provider_location': src_vref['provider_location']}) + return self.common.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + LOG.debug( + 'extend_volume volume id=%(volume_id)s new size=%(size)s', { + 'volume_id': volume['id'], 'size': new_size}) + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Deletes a volume.""" + LOG.debug('delete_volume volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + return self.common.delete_volume(volume) + + def migrate_volume(self, ctxt, volume, host): + """Migrate the volume to the specified host. + + Returns a boolean indicating whether the migration occurred, as well as + model_update. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { + 'volume_id': volume['id'], 'host': host['host']}) + return self.common.migrate_volume(volume, host) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + LOG.debug( + 'create_snapshot snapshot id=%(snapshot_id)s ' + 'volume_id=%(volume_id)s', { + 'snapshot_id': snapshot['id'], + 'volume_id': snapshot['volume_id']}) + return self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + LOG.debug( + 'delete_snapshot snapshot id=%(snapshot_id)s ' + 'volume_id=%(volume_id)s', { + 'snapshot_id': snapshot['id'], + 'volume_id': snapshot['volume_id']}) + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a volume.""" + pass + + def create_export(self, context, volume, connector): + """Exports the volume. + + Can optionally return a Dictionary of changes + to the volume object to be persisted. + """ + LOG.debug( + 'create_export volume provider_location=%(provider_location)s', { + 'provider_location': volume['provider_location']}) + return self.common.create_export(context, volume) + + def remove_export(self, context, volume): + """Removes an export for a volume.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection information. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value:: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + """ + LOG.debug( + 'initialize_connection volume id=%(volume_id)s ' + 'connector initiator=%(initiator)s', { + 'volume_id': volume['id'], + 'initiator': connector['initiator']}) + return self.common.initialize_connection(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + LOG.debug('terminate_connection volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + self.common.terminate_connection(volume, connector) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + LOG.debug('get_volume_stats refresh=%(refresh)s', { + 'refresh': refresh}) + return self.common.get_volume_stats(refresh) + + def manage_existing(self, volume, existing_ref): + """Manage an existing lun in the array. + + The lun should be in a manageable pool backend, otherwise + error would return. + Rename the backend storage object so that it matches the, + volume['name'] which is how drivers traditionally map between a + cinder volume and the associated backend storage object. + + :param existing_ref: Driver-specific information used to identify + a volume + """ + LOG.debug( + 'manage_existing volume: %(volume)s ' + 'existing_ref source: %(source)s', { + 'volume': volume, + 'source': existing_ref}) + return self.common.manage_existing(volume, existing_ref) + + def unmanage(self, volume): + """Removes the specified volume from Cinder management. + + Does not delete the underlying backend storage object. + + :param volume: Cinder volume to unmanage + """ + LOG.debug('unmanage volume id=%(volume_id)s', { + 'volume_id': volume['id']}) + self.common.unmanage(volume) + + def manage_existing_get_size(self, volume, existing_ref): + """Return size of volume to be managed by manage_existing. + + When calculating the size, round up to the next GB. + """ + LOG.debug( + 'manage_existing_get_size volume: %(volume)s ' + 'existing_ref source: %(source)s', { + 'volume': volume, + 'source': existing_ref}) + return self.common.manage_existing_get_size(volume, existing_ref) + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param new_type: A dictionary describing the volume type to convert to + :param diff: A dictionary with the difference between the two types + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug( + 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { + 'volume_id': volume['id'], 'type_id': new_type['id']}) + return self.common.retype(ctxt, volume, new_type, diff, host) + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + """Return model update for migrated volume. + + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :param original_volume_status: The status of the original volume + :returns: model_update to update DB with any needed changes + """ + LOG.debug( + 'update migrated volume original volume id= %(volume_id)s ' + 'new volume id=%(new_volume_id)s', { + 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) + return self.common.update_migrated_volume(ctxt, volume, new_volume, + original_volume_status) + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder.""" + + LOG.debug( + 'get_manageable_volumes CALLED ' + 'cinder_volumes: %(volume)s, ' + 'marker: %(mkr)s, ' + 'limit: %(lmt)s, ' + 'offset: %(_offset)s, ' + 'sort_keys: %(s_key)s, ' + 'sort_dirs: %(sort_dir)s', { + 'volume': cinder_volumes, + 'mkr': marker, + 'lmt': limit, + '_offset': offset, + 's_key': sort_keys, + 'sort_dir': sort_dirs + } + ) + return self.common.get_manageable_volumes(cinder_volumes, marker, + limit, offset, sort_keys, + sort_dirs) + + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings an existing backend storage object under Cinder management. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + """ + + LOG.debug( + 'manage_existing_snapshot CALLED ' + 'snapshot: %(si)s, ' + 'existing_ref: %(ref)s', { + 'si': snapshot, 'ref': existing_ref + } + ) + return self.common.manage_existing_snapshot(snapshot, existing_ref) + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + :returns size: Volume snapshot size in GiB (integer) + """ + + LOG.debug( + 'manage_existing_snapshot_get_size CALLED ' + 'snapshot: %(si)s, ' + 'existing_ref: %(ref)s', { + 'si': snapshot, 'ref': existing_ref + } + ) + return self.common.manage_existing_snapshot_get_size(snapshot, + existing_ref) + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder.""" + + LOG.debug( + 'get_manageable_volumes CALLED ' + 'cinder_snapshots: %(volume)s, ' + 'marker: %(mkr)s, ' + 'limit: %(lmt)s, ' + 'offset: %(_offset)s, ' + 'sort_keys: %(s_key)s, ' + 'sort_dirs: %(sort_dir)s', { + 'volume': cinder_snapshots, + 'mkr': marker, + 'lmt': limit, + '_offset': offset, + 's_key': sort_keys, + 'sort_dir': sort_dirs + } + ) + return self.common.get_manageable_snapshots(cinder_snapshots, marker, + limit, offset, sort_keys, + sort_dirs) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param snapshot: Cinder volume snapshot to unmanage + """ + LOG.debug( + 'manage_existing_snapshot_get_size CALLED ' + 'snapshot: %(si)s', { + 'si': snapshot + } + ) + return self.common.unmanage_snapshot(snapshot) diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py b/cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py b/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py new file mode 100644 index 00000000000..1097c2a080f --- /dev/null +++ b/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py @@ -0,0 +1,887 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Infortrend basic CLI factory. +""" + +import abc +import os +import time + +from oslo_concurrency import processutils +from oslo_log import log as logging +from oslo_utils import strutils +import six + +from cinder import utils + +LOG = logging.getLogger(__name__) + +DEFAULT_RETRY_TIME = 5 + + +def retry_cli(func): + def inner(self, *args, **kwargs): + total_retry_time = self.cli_retry_time + + if total_retry_time is None: + total_retry_time = DEFAULT_RETRY_TIME + + retry_time = 0 + while retry_time < total_retry_time: + rc, out = func(self, *args, **kwargs) + retry_time += 1 + + if rc == 0: + break + + LOG.error( + 'Retry %(retry)s times: %(method)s Failed ' + '%(rc)s: %(reason)s', { + 'retry': retry_time, + 'method': self.__class__.__name__, + 'rc': rc, + 'reason': out}) + + # show error log, not retrying + if rc == 1: + # RAID return fail + break + elif rc == 11: + # rc == 11 means not exist + break + elif rc == 20: + # rc == 20 means already exist + break + + LOG.debug( + 'Method: %(method)s Return Code: %(rc)s ' + 'Output: %(out)s', { + 'method': self.__class__.__name__, 'rc': rc, 'out': out}) + return rc, out + return inner + + +def os_execute(fd, raidcmd_timeout, command_line): + os.write(fd, command_line.encode('utf-8')) + return os_read(fd, 8192, 'RAIDCmd:>', raidcmd_timeout) + + +def os_read(fd, buffer_size, cmd_pattern, raidcmd_timeout): + content = '' + start_time = int(time.time()) + while True: + time.sleep(0.5) + output = os.read(fd, buffer_size) + if len(output) > 0: + content += output.decode('utf-8') + if content.find(cmd_pattern) >= 0: + break + if int(time.time()) - start_time > raidcmd_timeout: + content = 'Raidcmd timeout: %s' % content + LOG.error( + 'Raidcmd exceeds cli timeout [%(timeout)s]s.', { + 'timeout': raidcmd_timeout}) + break + return content + + +def strip_empty_in_list(list): + result = [] + for entry in list: + entry = entry.strip() + if entry != "": + result.append(entry) + + return result + + +def table_to_dict(table): + tableHeader = table[0].split(" ") + tableHeaderList = strip_empty_in_list(tableHeader) + + result = [] + + for i in range(len(table) - 2): + if table[i + 2].strip() == "": + break + + resultEntry = {} + tableEntry = table[i + 2].split(" ") + tableEntryList = strip_empty_in_list(tableEntry) + + for key, value in zip(tableHeaderList, tableEntryList): + resultEntry[key] = value + + result.append(resultEntry) + return result + + +def content_lines_to_dict(content_lines): + result = [] + resultEntry = {} + + for content_line in content_lines: + + if content_line.strip() == "": + result.append(resultEntry) + resultEntry = {} + continue + + split_entry = content_line.strip().split(": ", 1) + resultEntry[split_entry[0]] = split_entry[1] + + return result + + +@six.add_metaclass(abc.ABCMeta) +class BaseCommand(object): + + """The BaseCommand abstract class.""" + + def __init__(self): + super(BaseCommand, self).__init__() + + @abc.abstractmethod + def execute(self, *args, **kwargs): + pass + + +class ShellCommand(BaseCommand): + + """The Common ShellCommand.""" + + def __init__(self, cli_conf): + super(ShellCommand, self).__init__() + self.cli_retry_time = cli_conf.get('cli_retry_time') + + @retry_cli + def execute(self, *args, **kwargs): + commands = ' '.join(args) + result = None + rc = 0 + try: + result, err = utils.execute(commands, shell=True) + except processutils.ProcessExecutionError as pe: + rc = pe.exit_code + result = pe.stdout + result = result.replace('\n', '\\n') + LOG.error( + 'Error on execute command. ' + 'Error code: %(exit_code)d Error msg: %(result)s', { + 'exit_code': pe.exit_code, 'result': result}) + return rc, result + + +class ExecuteCommand(BaseCommand): + + """The Cinder Filter Command.""" + + def __init__(self, cli_conf): + super(ExecuteCommand, self).__init__() + self.cli_retry_time = cli_conf.get('cli_retry_time') + + @retry_cli + def execute(self, *args, **kwargs): + result = None + rc = 0 + try: + result, err = utils.execute(*args, **kwargs) + except processutils.ProcessExecutionError as pe: + rc = pe.exit_code + result = pe.stdout + result = result.replace('\n', '\\n') + LOG.error( + 'Error on execute command. ' + 'Error code: %(exit_code)d Error msg: %(result)s', { + 'exit_code': pe.exit_code, 'result': result}) + return rc, result + + +class CLIBaseCommand(BaseCommand): + + """The CLIBaseCommand class.""" + + def __init__(self, cli_conf): + super(CLIBaseCommand, self).__init__() + self.cli_retry_time = cli_conf.get('cli_retry_time') + self.raidcmd_timeout = cli_conf.get('raidcmd_timeout') + self.cli_cache = cli_conf.get('cli_cache') + self.pid = cli_conf.get('pid') + self.fd = cli_conf.get('fd') + self.command = "" + self.parameters = () + self.show_noinit = "" + self.command_line = "" + + def _generate_command(self, parameters): + """Generate execute Command. use java, execute, command, parameters.""" + self.parameters = parameters + parameters_line = ' '.join(parameters) + + self.command_line = "{0} {1} {2}\n".format( + self.command, + parameters_line, + self.show_noinit) + + return self.command_line + + def _parser(self, content=None): + """The parser to parse command result. + + :param content: The parse Content + :returns: parse result + """ + content = content.replace("\r", "") + content = content.replace("\\/-", "") + content = content.strip() + LOG.debug(content) + + if content is not None: + content_lines = content.split("\n") + rc, out = self._parse_return(content_lines) + + if rc != 0: + return rc, out + else: + return rc, content_lines + + return -1, None + + @retry_cli + def execute(self, *args, **kwargs): + command_line = self._generate_command(args) + LOG.debug('Executing: %(command)s', { + 'command': strutils.mask_password(command_line)}) + rc = 0 + result = None + try: + content = self._execute(command_line) + rc, result = self._parser(content) + except processutils.ProcessExecutionError as pe: + rc = -2 # prevent confusing with cli real rc + result = pe.stdout + result = result.replace('\n', '\\n') + LOG.error( + 'Error on execute %(command)s. ' + 'Error code: %(exit_code)d Error msg: %(result)s', { + 'command': strutils.mask_password(command_line), + 'exit_code': pe.exit_code, + 'result': result}) + return rc, result + + def _execute(self, command_line): + return os_execute( + self.fd, self.raidcmd_timeout, command_line) + + def _parse_return(self, content_lines): + """Get the end of command line result.""" + rc = 0 + if 'Raidcmd timeout' in content_lines[0]: + rc = -3 + return_cli_result = content_lines + elif len(content_lines) < 4: + rc = -4 + return_cli_result = 'Raidcmd output error: %s' % content_lines + else: + return_value = content_lines[-3].strip().split(' ', 1)[1] + return_cli_result = content_lines[-4].strip().split(' ', 1)[1] + rc = int(return_value, 16) + + return rc, return_cli_result + + +class ConnectRaid(CLIBaseCommand): + + """The Connect Raid Command.""" + + def __init__(self, *args, **kwargs): + super(ConnectRaid, self).__init__(*args, **kwargs) + self.command = "connect" + + +class CheckConnection(CLIBaseCommand): + + """The Check Connection Command.""" + + def __init__(self, *args, **kwargs): + super(CheckConnection, self).__init__(*args, **kwargs) + self.command = "lock" + + +class InitCache(CLIBaseCommand): + """Refresh cacahe data for update volume status.""" + + def __init__(self, *args, **kwargs): + super(InitCache, self).__init__(*args, **kwargs) + self.command = "utility init-cache" + + +class CreateLD(CLIBaseCommand): + + """The Create LD Command.""" + + def __init__(self, *args, **kwargs): + super(CreateLD, self).__init__(*args, **kwargs) + self.command = "create ld" + + +class CreateLV(CLIBaseCommand): + + """The Create LV Command.""" + + def __init__(self, *args, **kwargs): + super(CreateLV, self).__init__(*args, **kwargs) + self.command = "create lv" + + +class CreatePartition(CLIBaseCommand): + + """Create Partition. + + create part + [LV-ID] [name] [size={partition-size}] + [min={minimal-reserve-size}] [init={switch}] + [tier={tier-level-list}] + """ + + def __init__(self, *args, **kwargs): + super(CreatePartition, self).__init__(*args, **kwargs) + self.command = "create part" + + +class DeletePartition(CLIBaseCommand): + + """Delete Partition. + + delete part [partition-ID] [-y] + """ + + def __init__(self, *args, **kwargs): + super(DeletePartition, self).__init__(*args, **kwargs) + self.command = "delete part" + + +class SetPartition(CLIBaseCommand): + + """Set Partition. + + set part + [partition-ID] [name={partition-name}] [min={minimal-reserve-size}] + set part expand [partition-ID] [size={expand-size}] + set part purge [partition-ID] [number] [rule-type] + set part reclaim [partition-ID] + set part tier-resided [partition-ID] tier={tier-level-list} + """ + + def __init__(self, *args, **kwargs): + super(SetPartition, self).__init__(*args, **kwargs) + self.command = "set part" + + +class SetLV(CLIBaseCommand): + + """Set Logical Volume. + + set lv tier-migrate [LV-ID] [part={partition-IDs}] + """ + + def __init__(self, *args, **kwargs): + super(SetLV, self).__init__(*args, **kwargs) + self.command = "set lv" + + +class SetSnapshot(CLIBaseCommand): + + """Set Logical Volume. + + set lv tier-migrate [LV-ID] [part={partition-IDs}] + """ + + def __init__(self, *args, **kwargs): + super(SetSnapshot, self).__init__(*args, **kwargs) + self.command = "set si" + + +class CreateMap(CLIBaseCommand): + + """Map the Partition on the channel. + + create map + [part] [partition-ID] [Channel-ID] + [Target-ID] [LUN-ID] [assign={assign-to}] + """ + + def __init__(self, *args, **kwargs): + super(CreateMap, self).__init__(*args, **kwargs) + self.command = "create map" + + +class DeleteMap(CLIBaseCommand): + + """Unmap the Partition on the channel. + + delete map + [part] [partition-ID] [Channel-ID] + [Target-ID] [LUN-ID] [-y] + """ + + def __init__(self, *args, **kwargs): + super(DeleteMap, self).__init__(*args, **kwargs) + self.command = "delete map" + + +class CreateSnapshot(CLIBaseCommand): + + """Create partition's Snapshot. + + create si [part] [partition-ID] + """ + + def __init__(self, *args, **kwargs): + super(CreateSnapshot, self).__init__(*args, **kwargs) + self.command = "create si" + + +class DeleteSnapshot(CLIBaseCommand): + + """Delete partition's Snapshot. + + delete si [snapshot-image-ID] [-y] + """ + + def __init__(self, *args, **kwargs): + super(DeleteSnapshot, self).__init__(*args, **kwargs) + self.command = "delete si" + + +class CreateReplica(CLIBaseCommand): + + """Create partition or snapshot's replica. + + create replica + [name] [part | si] [source-volume-ID] + [part] [target-volume-ID] [type={replication-mode}] + [priority={level}] [desc={description}] + [incremental={switch}] [timeout={value}] + [compression={switch}] + """ + + def __init__(self, *args, **kwargs): + super(CreateReplica, self).__init__(*args, **kwargs) + self.command = "create replica" + + +class DeleteReplica(CLIBaseCommand): + + """Delete and terminate specific replication job. + + delete replica [volume-pair-ID] [-y] + """ + + def __init__(self, *args, **kwargs): + super(DeleteReplica, self).__init__(*args, **kwargs) + self.command = "delete replica" + + +class CreateIQN(CLIBaseCommand): + + """Create host iqn for CHAP or lun filter. + + create iqn + [IQN] [IQN-alias-name] [user={username}] [password={secret}] + [target={name}] [target-password={secret}] [ip={ip-address}] + [mask={netmask-ip}] + """ + + def __init__(self, *args, **kwargs): + super(CreateIQN, self).__init__(*args, **kwargs) + self.command = "create iqn" + + +class DeleteIQN(CLIBaseCommand): + + """Delete host iqn by name. + + delete iqn [name] + """ + + def __init__(self, *args, **kwargs): + super(DeleteIQN, self).__init__(*args, **kwargs) + self.command = "delete iqn" + + +class SetIOTimeout(CLIBaseCommand): + + """Set CLI IO timeout. + + utility set io-timeout [time] + """ + + def __init__(self, *args, **kwargs): + super(SetIOTimeout, self).__init__(*args, **kwargs) + self.command = "utility set io-timeout" + + +class ShowCommand(CLIBaseCommand): + + """Basic Show Command.""" + + def __init__(self, *args, **kwargs): + super(ShowCommand, self).__init__(*args, **kwargs) + self.param_detail = "-l" + self.default_type = "table" + self.start_key = "" + if self.cli_cache: + self.show_noinit = "-noinit" + + def _parser(self, content=None): + """Parse Table or Detail format into dict. + + # Table format + + ID Name LD-amount + ---------------------- + 123 LV-1 1 + + # Result + + { + 'ID': '123', + 'Name': 'LV-1', + 'LD-amount': '1' + } + + # Detail format + + ID: 5DE94FF775D81C30 + Name: LV-1 + LD-amount: 1 + + # Result + + { + 'ID': '123', + 'Name': 'LV-1', + 'LD-amount': '1' + } + + :param content: The parse Content. + :returns: parse result + """ + rc, out = super(ShowCommand, self)._parser(content) + + # Error. + if rc != 0: + return rc, out + + # No content. + if len(out) < 6: + return rc, [] + + detect_type = self.detect_type() + + # Show detail content. + if detect_type == "list": + + start_id = self.detect_detail_start_index(out) + + if start_id < 0: + return rc, [] + + result = content_lines_to_dict(out[start_id:-3]) + else: + + start_id = self.detect_table_start_index(out) + + if start_id < 0: + return rc, [] + + result = table_to_dict(out[start_id:-4]) + + return rc, result + + def detect_type(self): + if self.param_detail in self.parameters: + detect_type = "list" + else: + detect_type = self.default_type + return detect_type + + def detect_table_start_index(self, content): + for i in range(1, len(content)): + key = content[i].strip().split(' ') + if self.start_key in key[0].strip(): + return i + + return -1 + + def detect_detail_start_index(self, content): + for i in range(1, len(content)): + split_entry = content[i].strip().split(' ') + if len(split_entry) >= 2 and ':' in split_entry[0]: + return i + + return -1 + + +class ShowLD(ShowCommand): + + """Show LD. + + show ld [index-list] + """ + + def __init__(self, *args, **kwargs): + super(ShowLD, self).__init__(*args, **kwargs) + self.command = "show ld" + + +class ShowLV(ShowCommand): + + """Show LV. + + show lv [lv={LV-IDs}] [-l] + """ + + def __init__(self, *args, **kwargs): + super(ShowLV, self).__init__(*args, **kwargs) + self.command = "show lv" + self.start_key = "ID" + self.show_noinit = "" + + def detect_table_start_index(self, content): + if "tier" in self.parameters: + self.start_key = "LV-Name" + + for i in range(1, len(content)): + key = content[i].strip().split(' ') + if self.start_key in key[0].strip(): + return i + + return -1 + + +class ShowPartition(ShowCommand): + + """Show Partition. + + show part [part={partition-IDs} | lv={LV-IDs}] [-l] + """ + + def __init__(self, *args, **kwargs): + super(ShowPartition, self).__init__(*args, **kwargs) + self.command = "show part" + self.start_key = "ID" + self.show_noinit = "" + + +class ShowSnapshot(ShowCommand): + + """Show Snapshot. + + show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l] + """ + + def __init__(self, *args, **kwargs): + super(ShowSnapshot, self).__init__(*args, **kwargs) + self.command = "show si" + self.start_key = "Index" + + +class ShowDevice(ShowCommand): + + """Show Device. + + show device + """ + + def __init__(self, *args, **kwargs): + super(ShowDevice, self).__init__(*args, **kwargs) + self.command = "show device" + self.start_key = "Index" + + +class ShowChannel(ShowCommand): + + """Show Channel. + + show channel + """ + + def __init__(self, *args, **kwargs): + super(ShowChannel, self).__init__(*args, **kwargs) + self.command = "show channel" + self.start_key = "Ch" + + +class ShowDisk(ShowCommand): + + """The Show Disk Command. + + show disk [disk-index-list | channel={ch}] + """ + + def __init__(self, *args, **kwargs): + super(ShowDisk, self).__init__(*args, **kwargs) + self.command = "show disk" + + +class ShowMap(ShowCommand): + + """Show Map. + + show map [part={partition-IDs} | channel={channel-IDs}] [-l] + """ + + def __init__(self, *args, **kwargs): + super(ShowMap, self).__init__(*args, **kwargs) + self.command = "show map" + self.start_key = "Ch" + + +class ShowNet(ShowCommand): + + """Show IP network. + + show net [id={channel-IDs}] [-l] + """ + + def __init__(self, *args, **kwargs): + super(ShowNet, self).__init__(*args, **kwargs) + self.command = "show net" + self.start_key = "ID" + + +class ShowLicense(ShowCommand): + + """Show License. + + show license + """ + + def __init__(self, *args, **kwargs): + super(ShowLicense, self).__init__(*args, **kwargs) + self.command = "show license" + self.start_key = "License" + + def _parser(self, content=None): + """Parse License format. + + # License format + + License Amount(Partition/Subsystem) Expired + ------------------------------------------------ + EonPath --- True + + # Result + + { + 'EonPath': { + 'Amount': '---', + 'Support': True + } + } + + :param content: The parse Content. + :returns: parse result + """ + rc, out = super(ShowLicense, self)._parser(content) + + if rc != 0: + return rc, out + + if len(out) > 0: + result = {} + for entry in out: + if entry['Expired'] == '---' or entry['Expired'] == 'Expired': + support = False + else: + support = True + result[entry['License']] = { + 'Amount': + entry['Amount(Partition/Subsystem)'], + 'Support': support + } + return rc, result + + return rc, [] + + +class ShowReplica(ShowCommand): + + """Show information of all replication jobs or specific job. + + show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs} + """ + + def __init__(self, *args, **kwargs): + super(ShowReplica, self).__init__(*args, **kwargs) + self.command = 'show replica' + self.show_noinit = "" + + +class ShowWWN(ShowCommand): + + """Show Fibre network. + + show wwn + """ + + def __init__(self, *args, **kwargs): + super(ShowWWN, self).__init__(*args, **kwargs) + self.command = "show wwn" + self.start_key = "CH" + + +class ShowIQN(ShowCommand): + + """Show iSCSI initiator IQN which is set by create iqn. + + show iqn + """ + + LIST_START_LINE = "List of initiator IQN(s):" + + def __init__(self, *args, **kwargs): + super(ShowIQN, self).__init__(*args, **kwargs) + self.command = "show iqn" + self.default_type = "list" + + def detect_detail_start_index(self, content): + for i in range(1, len(content)): + if content[i].strip() == self.LIST_START_LINE: + return i + 2 + + return -1 + + +class ShowHost(ShowCommand): + + """Show host settings. + + show host + """ + + def __init__(self, *args, **kwargs): + super(ShowHost, self).__init__(*args, **kwargs) + self.command = "show host" + self.default_type = "list" + + def detect_detail_start_index(self, content): + for i in range(1, len(content)): + if ':' in content[i]: + return i + return -1 diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py b/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py new file mode 100644 index 00000000000..202f888a4bc --- /dev/null +++ b/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py @@ -0,0 +1,2758 @@ +# Copyright (c) 2015 Infortrend Technology, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Infortrend Common CLI. +""" +import math +import os +import time + +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_utils import timeutils +from oslo_utils import units + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli +from cinder.volume.drivers.san import san +from cinder.volume import utils +from cinder.volume import volume_types +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + +infortrend_opts = [ + cfg.ListOpt('infortrend_pools_name', + default='', + help='The Infortrend logical volumes name list. ' + 'It is separated with comma.'), + cfg.StrOpt('infortrend_cli_path', + default='/opt/bin/Infortrend/raidcmd_ESDS10.jar', + help='The Infortrend CLI absolute path.'), + cfg.IntOpt('infortrend_cli_max_retries', + default=5, + help='The maximum retry times if a command fails.'), + cfg.IntOpt('infortrend_cli_timeout', + default=60, + help='The timeout for CLI in seconds.'), + cfg.ListOpt('infortrend_slots_a_channels_id', + default='', + help='Infortrend raid channel ID list on Slot A ' + 'for OpenStack usage. It is separated with comma.'), + cfg.ListOpt('infortrend_slots_b_channels_id', + default='', + help='Infortrend raid channel ID list on Slot B ' + 'for OpenStack usage. It is separated with comma.'), + cfg.StrOpt('infortrend_iqn_prefix', + default='iqn.2002-10.com.infortrend', + help='Infortrend iqn prefix for iSCSI.'), + cfg.BoolOpt('infortrend_cli_cache', + default=False, + help='The Infortrend CLI cache. ' + 'While set True, the RAID status report will use cache ' + 'stored in the CLI. Never enable this unless the RAID is ' + 'managed only by Openstack and only by one infortrend ' + 'cinder-volume backend. Otherwise, CLI might report ' + 'out-dated status to cinder and thus there might be some ' + 'race condition among all backend/CLIs.'), + cfg.StrOpt('java_path', + default='/usr/bin/java', + help='The Java absolute path.'), +] + +CONF = cfg.CONF +CONF.register_opts(infortrend_opts) + +CLI_RC_FILTER = { + 'CreatePartition': {'error': _('Failed to create partition.')}, + 'DeletePartition': {'error': _('Failed to delete partition.')}, + 'SetPartition': {'error': _('Failed to set partition.')}, + 'CreateMap': { + 'warning': { + 1: 'RAID return Fail. Might be LUN conflict.', + 20: 'The MCS Channel is grouped. / LUN Already Used.'}, + 'error': _('Failed to create map.'), + }, + 'DeleteMap': { + 'warning': {11: 'No mapping.'}, + 'error': _('Failed to delete map.'), + }, + 'CreateSnapshot': {'error': _('Failed to create snapshot.')}, + 'DeleteSnapshot': { + 'warning': {11: 'No such snapshot exist.'}, + 'error': _('Failed to delete snapshot.') + }, + 'CreateReplica': {'error': _('Failed to create replica.')}, + 'DeleteReplica': {'error': _('Failed to delete replica.')}, + 'CreateIQN': { + 'warning': {20: 'IQN already existed.'}, + 'error': _('Failed to create iqn.'), + }, + 'DeleteIQN': { + 'warning': { + 20: 'IQN has been used to create map.', + 11: 'No such host alias name.', + }, + 'error': _('Failed to delete iqn.'), + }, + 'ShowLV': {'error': _('Failed to get lv info.')}, + 'ShowPartition': {'error': _('Failed to get partition info.')}, + 'ShowSnapshot': {'error': _('Failed to get snapshot info.')}, + 'ShowDevice': {'error': _('Failed to get device info.')}, + 'ShowChannel': {'error': _('Failed to get channel info.')}, + 'ShowMap': {'error': _('Failed to get map info.')}, + 'ShowNet': {'error': _('Failed to get network info.')}, + 'ShowLicense': {'error': _('Failed to get license info.')}, + 'ShowReplica': {'error': _('Failed to get replica info.')}, + 'ShowWWN': {'error': _('Failed to get wwn info.')}, + 'ShowIQN': {'error': _('Failed to get iqn info.')}, + 'ShowHost': {'error': _('Failed to get host info.')}, + 'SetIOTimeout': {'error': _('Failed to set IO timeout.')}, + 'ConnectRaid': {'error': _('Failed to connect to raid.')}, + 'InitCache': { + 'warning': {9: 'Device not connected.'}, + 'error': _('Failed to init cache.')}, + 'ExecuteCommand': {'error': _('Failed to execute common command.')}, + 'ShellCommand': {'error': _('Failed to execute shell command.')}, +} + + +def log_func(func): + def inner(self, *args, **kwargs): + LOG.debug('Entering: %(method)s', {'method': func.__name__}) + start = timeutils.utcnow() + ret = func(self, *args, **kwargs) + end = timeutils.utcnow() + LOG.debug( + 'Leaving: %(method)s, ' + 'Spent: %(time)s sec, ' + 'Return: %(ret)s.', { + 'method': func.__name__, + 'time': timeutils.delta_seconds(start, end), + 'ret': ret}) + return ret + return inner + + +def mi_to_gi(mi_size): + return mi_size * units.Mi / units.Gi + + +def gi_to_mi(gi_size): + return gi_size * units.Gi / units.Mi + + +def ti_to_gi(ti_size): + return ti_size * units.Ti / units.Gi + + +def ti_to_mi(ti_size): + return ti_size * units.Ti / units.Mi + + +class InfortrendCliException(exception.CinderException): + message = _("Infortrend CLI exception: %(err)s Param: %(param)s " + "(Return Code: %(rc)s) (Output: %(out)s)") + + +class InfortrendCommon(object): + + """The Infortrend's Common Command using CLI. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver + 1.0.1 - Support DS4000 + 1.0.2 - Support GS/GSe Family + 1.0.3 - Support MPIO for iSCSI protocol + 1.0.4 - Fix Nova live migration (bug #1481968) + 1.1.0 - Improve driver performance + 1.1.1 - Fix creating volume on a wrong pool + Fix manage-existing volume issue + 1.1.2 - Add volume migration check + 2.0.0 - Enhance extraspecs usage and refactor retype + 2.0.1 - Improve speed for deleting volume + 2.0.2 - Remove timeout for replication + 2.0.3 - Use full ID for volume name + 2.1.0 - Support for list manageable volume + Support for list/manage/unmanage snapshot + Remove unnecessary check in snapshot + 2.1.1 - Add Lun ID overflow check + 2.1.2 - Support for force detach volume + 2.1.3 - Add handling for LUN ID conflict for Active/Active cinder + Improve speed for attach/detach/polling commands + 2.1.4 - Check CLI connection first for polling process + """ + + VERSION = '2.1.4' + + constants = { + 'ISCSI_PORT': 3260, + 'MAX_LUN_MAP_PER_CHL': 128, + } + + PROVISIONING_KEY = 'infortrend:provisioning' + TIERING_SET_KEY = 'infortrend:tiering' + + PROVISIONING_VALUES = ['thin', 'full'] + TIERING_VALUES = [0, 1, 2, 3] + + def __init__(self, protocol, configuration=None): + + self.protocol = protocol + self.configuration = configuration + self.configuration.append_config_values(san.san_opts) + self.configuration.append_config_values(infortrend_opts) + + self.path = self.configuration.infortrend_cli_path + self.password = self.configuration.san_password + self.ip = self.configuration.san_ip + self.cli_retry_time = self.configuration.infortrend_cli_max_retries + self.cli_timeout = self.configuration.infortrend_cli_timeout + self.cli_cache = self.configuration.infortrend_cli_cache + self.iqn_prefix = self.configuration.infortrend_iqn_prefix + self.iqn = self.iqn_prefix + ':raid.uid%s.%s%s%s' + self.unmanaged_prefix = 'cinder-unmanaged-%s' + self.java_path = self.configuration.java_path + + self.fc_lookup_service = fczm_utils.create_lookup_service() + + self.backend_name = None + self._volume_stats = None + self.system_id = None + self.pid = None + self.fd = None + self._model_type = 'R' + + self.map_dict = { + 'slot_a': {}, + 'slot_b': {}, + } + self.map_dict_init = False + self.target_dict = { + 'slot_a': {}, + 'slot_b': {}, + } + if self.protocol == 'iSCSI': + self.mcs_dict = { + 'slot_a': {}, + 'slot_b': {}, + } + self.tier_pools_dict = {} + + def check_for_setup_error(self): + # These two checks needs raidcmd to be ready + self._check_pools_setup() + self._check_host_setup() + + def do_setup(self): + if self.ip == '': + msg = _('san_ip is not set.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + if self.cli_timeout < 40: + msg = _('infortrend_cli_timeout should be larger than 40.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + self._init_pool_dict() + self._init_channel_list() + self._init_raidcmd() + self.cli_conf = { + 'path': self.path, + 'cli_retry_time': self.cli_retry_time, + 'raidcmd_timeout': self.cli_timeout, + 'cli_cache': self.cli_cache, + 'pid': self.pid, + 'fd': self.fd, + } + self._init_raid_connection() + self._set_raidcmd() + + def _init_pool_dict(self): + self.pool_dict = {} + pools_name = self.configuration.infortrend_pools_name + if pools_name == '': + msg = _('Pools name is not set.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + tmp_pool_list = pools_name + for pool in tmp_pool_list: + self.pool_dict[pool.strip()] = '' + + def _init_channel_list(self): + self.channel_list = { + 'slot_a': [], + 'slot_b': [], + } + tmp_channel_list = ( + self.configuration.infortrend_slots_a_channels_id + ) + self.channel_list['slot_a'] = ( + [str(channel) for channel in tmp_channel_list] + ) + tmp_channel_list = ( + self.configuration.infortrend_slots_b_channels_id + ) + self.channel_list['slot_b'] = ( + [str(channel) for channel in tmp_channel_list] + ) + + def _init_raidcmd(self): + if not self.pid: + self.pid, self.fd = os.forkpty() + if self.pid == 0: + try: + os.execv(self.java_path, + [self.java_path, '-jar', self.path]) + except OSError: + msg = _('Raidcmd failed to start. ' + 'Please check Java is installed.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + check_java_start = cli.os_read(self.fd, 1024, 'RAIDCmd:>', 10) + if 'Raidcmd timeout' in check_java_start: + msg = _('Raidcmd failed to start. ' + 'Please check Java is installed.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + LOG.debug('Raidcmd [%s:%s] start!', self.pid, self.fd) + + def _set_raidcmd(self): + cli_io_timeout = str(self.cli_timeout - 10) + rc, _ = self._execute('SetIOTimeout', cli_io_timeout) + LOG.debug('CLI IO timeout is [%s]', cli_io_timeout) + + def _init_raid_connection(self): + raid_password = '' + if self.password: + raid_password = 'password=%s' % self.password + + rc, _ = self._execute('ConnectRaid', self.ip, raid_password, '-notiOn') + LOG.info('Raid [%s] is connected!', self.ip) + + def _execute_command(self, cli_type, *args, **kwargs): + command = getattr(cli, cli_type) + return command(self.cli_conf).execute(*args, **kwargs) + + def _execute(self, cli_type, *args, **kwargs): + LOG.debug('Executing command type: %(type)s.', {'type': cli_type}) + + @lockutils.synchronized('raidcmd-%s' % self.pid, 'infortrend-', False) + def _lock_raidcmd(cli_type, *args, **kwargs): + return self._execute_command(cli_type, *args, **kwargs) + + rc, out = _lock_raidcmd(cli_type, *args, **kwargs) + + if rc != 0: + if cli_type == 'CheckConnection': + return rc, out + elif ('warning' in CLI_RC_FILTER[cli_type] and + rc in CLI_RC_FILTER[cli_type]['warning']): + LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc]) + else: + msg = CLI_RC_FILTER[cli_type]['error'] + LOG.error(msg) + raise InfortrendCliException( + err=msg, param=args, rc=rc, out=out) + return rc, out + + @log_func + def _init_map_info(self): + if not self.map_dict_init: + + rc, channel_info = self._execute('ShowChannel') + + if 'BID' in channel_info[0]: + self._model_type = 'R' + self._set_channel_id(channel_info, 'slot_b') + else: + self._model_type = 'G' + + self._set_channel_id(channel_info, 'slot_a') + + self.map_dict_init = True + + for controller in sorted(self.map_dict.keys()): + LOG.debug('Controller: [%(controller)s] ' + 'enable channels: %(ch)s', { + 'controller': controller, + 'ch': sorted(self.map_dict[controller].keys())}) + + @log_func + def _update_map_info(self, multipath=False): + """Record the driver mapping information. + + map_dict = { + 'slot_a': { + '0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4 + }, + 'slot_b' : { + '1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3 + } + } + """ + rc, map_info = self._execute('ShowMap') + + self._update_map_info_by_slot(map_info, 'slot_a') + + if multipath and self._model_type == 'R': + self._update_map_info_by_slot(map_info, 'slot_b') + + return map_info + + @log_func + def _update_map_info_by_slot(self, map_info, slot_key): + for key, value in self.map_dict[slot_key].items(): + self.map_dict[slot_key][key] = list( + range(self.constants['MAX_LUN_MAP_PER_CHL'])) + + if len(map_info) > 0 and isinstance(map_info, list): + for entry in map_info: + ch = entry['Ch'] + lun = entry['LUN'] + if ch not in self.map_dict[slot_key].keys(): + continue + + target_id = self.target_dict[slot_key][ch] + if (entry['Target'] == target_id and + int(lun) in self.map_dict[slot_key][ch]): + self.map_dict[slot_key][ch].remove(int(lun)) + + def _check_initiator_has_lun_map(self, initiator_info): + rc, map_info = self._execute('ShowMap') + + if not isinstance(initiator_info, list): + initiator_info = (initiator_info,) + if len(map_info) > 0: + for initiator_name in initiator_info: + for entry in map_info: + if initiator_name.lower() == entry['Host-ID'].lower(): + return True + return False + + @log_func + def _set_channel_id( + self, channel_info, controller): + + if self.protocol == 'iSCSI': + check_channel_type = ('NETWORK', 'LAN') + else: + check_channel_type = ('FIBRE', 'Fibre') + + for entry in channel_info: + if entry['Type'] in check_channel_type: + if entry['Ch'] in self.channel_list[controller]: + self.map_dict[controller][entry['Ch']] = [] + + if self.protocol == 'iSCSI': + self._update_mcs_dict( + entry['Ch'], entry['MCS'], controller) + + self._update_target_dict(entry, controller) + + # check the channel status + if entry['curClock'] == '---': + LOG.warning( + 'Controller[%(controller)s] ' + 'Channel[%(Ch)s] not linked, please check.', { + 'controller': controller, 'Ch': entry['Ch']}) + + @log_func + def _update_target_dict(self, channel, controller): + """Record the target id for mapping. + + # R model + target_dict = { + 'slot_a': { + '0': '0', + '1': '0', + }, + 'slot_b': { + '0': '1', + '1': '1', + }, + } + + # G model + target_dict = { + 'slot_a': { + '2': '32', + '3': '112', + } + } + """ + if self._model_type == 'G': + self.target_dict[controller][channel['Ch']] = channel['ID'] + else: + if controller == 'slot_a': + self.target_dict[controller][channel['Ch']] = channel['AID'] + else: + self.target_dict[controller][channel['Ch']] = channel['BID'] + + def _update_mcs_dict(self, channel_id, mcs_id, controller): + """Record the iSCSI MCS topology. + + # R model with mcs, but it not working with iSCSI multipath + mcs_dict = { + 'slot_a': { + '0': ['0', '1'], + '2': ['2'], + '3': ['3'], + }, + 'slot_b': { + '0': ['0', '1'], + '2': ['2'] + } + } + + # G model with mcs + mcs_dict = { + 'slot_a': { + '0': ['0', '1'], + '1': ['2'] + }, + 'slot_b': {} + } + """ + if mcs_id not in self.mcs_dict[controller]: + self.mcs_dict[controller][mcs_id] = [] + self.mcs_dict[controller][mcs_id].append(channel_id) + + def _check_pools_setup(self): + temp_pool_dict = self.pool_dict.copy() + + rc, lv_info = self._execute('ShowLV') + + for lv in lv_info: + if lv['Name'] in temp_pool_dict.keys(): + del temp_pool_dict[lv['Name']] + self.pool_dict[lv['Name']] = lv['ID'] + if len(temp_pool_dict) == 0: + break + + if len(temp_pool_dict) != 0: + msg = _('Please create %(pool_list)s pool in advance!') % { + 'pool_list': list(temp_pool_dict.keys())} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + def _check_host_setup(self): + rc, host_info = self._execute('ShowHost') + max_lun = int(host_info[0]['Max LUN per ID']) + device_type = host_info[0]['Peripheral device type'] + + if 'No Device Present' not in device_type: + msg = _('Please set to ' + ' in advance!') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + self.constants['MAX_LUN_MAP_PER_CHL'] = max_lun + system_id = self._get_system_id(self.ip) + LOG.info('Device: [%(device)s] ' + 'max LUN setting is: [%(luns)s]', { + 'device': system_id, + 'luns': self.constants['MAX_LUN_MAP_PER_CHL']}) + + def create_volume(self, volume): + """Create a Infortrend partition.""" + + self._create_partition_by_default(volume) + part_id = self._get_part_id(volume['id']) + + system_id = self._get_system_id(self.ip) + + model_dict = { + 'system_id': system_id, + 'partition_id': part_id, + } + + model_update = { + "provider_location": self._concat_provider_location(model_dict), + } + LOG.info('Create Volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + return model_update + + def _create_partition_by_default(self, volume): + pool_id = self._get_volume_pool_id(volume) + self._create_partition_with_pool(volume, pool_id) + + def _create_partition_with_pool( + self, volume, pool_id, extraspecs=None): + + volume_size = gi_to_mi(volume['size']) + pool_name = volume['host'].split('#')[-1] + + if extraspecs: + extraspecs = self._get_extraspecs_set(extraspecs) + else: + extraspecs = self._get_volume_type_extraspecs(volume) + + pool_extraspecs = self._get_pool_extraspecs(pool_name, extraspecs) + provisioning = pool_extraspecs['provisioning'] + tiering = pool_extraspecs['tiering'] + + extraspecs_dict = {} + # Normal pool + if pool_id not in self.tier_pools_dict.keys(): + if provisioning == 'thin': + extraspecs_dict['provisioning'] = int(volume_size * 0.2) + extraspecs_dict['init'] = 'disable' + # Tier pool + else: + pool_tiers = self.tier_pools_dict[pool_id] + if tiering == 'all': + # thin provisioning reside on all tiers + if provisioning == 'thin': + extraspecs_dict['provisioning'] = 0 + tiering_set = ','.join(str(i) for i in pool_tiers) + extraspecs_dict['tiering'] = tiering_set + extraspecs_dict['init'] = 'disable' + # full provisioning reside on the top tier + else: + top_tier = self.tier_pools_dict.get(pool_id)[0] + self._check_tier_space(top_tier, pool_id, volume_size) + extraspecs_dict['tiering'] = str(top_tier) + else: + # check extraspecs fit the real pool tiers + if not self._check_pool_tiering(pool_tiers, tiering): + msg = _('Tiering extraspecs %(pool_name)s:%(tiering)s ' + 'can not fit in the real tiers %(pool_tier)s.') % { + 'pool_name': pool_name, + 'tiering': tiering, + 'pool_tier': pool_tiers} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + # User specific tier levels + if provisioning == 'thin': + extraspecs_dict['provisioning'] = 0 + tiering_set = ','.join(str(i) for i in tiering) + extraspecs_dict['tiering'] = tiering_set + extraspecs_dict['init'] = 'disable' + else: + self._check_tier_space(tiering[0], pool_id, volume_size) + extraspecs_dict['tiering'] = str(tiering[0]) + + cmd = '' + if extraspecs_dict: + cmd = self._create_part_parameters_str(extraspecs_dict) + + commands = (pool_id, volume['id'], 'size=%s' % int(volume_size), cmd) + self._execute('CreatePartition', *commands) + + def _check_pool_tiering(self, pool_tiers, extra_specs_tiers): + return set(extra_specs_tiers).issubset(pool_tiers) + + def _check_tier_pool_or_not(self, pool_id): + if pool_id in self.tier_pools_dict.keys(): + return True + return False + + def _check_tier_space(self, tier_level, pool_id, volume_size): + rc, lv_info = self._execute('ShowLV', 'tier') + if lv_info: + for entry in lv_info: + if (entry['LV-ID'] == pool_id and + int(entry['Tier']) == tier_level): + total_space = self._parse_size(entry['Size'], 'MB') + used_space = self._parse_size(entry['Used'], 'MB') + if not (total_space and used_space): + return + elif volume_size > (total_space - used_space): + LOG.warning('Tier pool [%(pool_id)s] ' + 'has already run out of space in ' + 'tier level [%(tier_level)s].', { + 'pool_id': pool_id, + 'tier_level': tier_level}) + + def _parse_size(self, size_string, return_unit): + size = float(size_string.split(' ', 1)[0]) + if 'TB' in size_string: + if return_unit == 'GB': + return round(ti_to_gi(size), 2) + elif return_unit == 'MB': + return round(ti_to_mi(size)) + elif 'GB' in size_string: + if return_unit == 'GB': + return round(size, 2) + elif return_unit == 'MB': + return round(gi_to_mi(size)) + elif 'MB' in size_string: + if return_unit == 'GB': + return round(mi_to_gi(size), 2) + elif return_unit == 'MB': + return round(size) + else: + LOG.warning('Tier size [%(size_string)s], ' + 'the unit is not recognized.', { + 'size_string': size_string}) + return + + def _create_part_parameters_str(self, extraspecs_dict): + parameters_list = [] + parameters = { + 'provisioning': 'min=%sMB', + 'tiering': 'tier=%s', + 'init': 'init=%s', + } + for extraspec in sorted(extraspecs_dict.keys()): + value = parameters[extraspec] % (extraspecs_dict[extraspec]) + parameters_list.append(value) + + return ' '.join(parameters_list) + + @log_func + def _iscsi_create_map(self, part_id, multipath, host, system_id): + + host_filter = self._create_host_filter(host) + rc, net_list = self._execute('ShowNet') + self._update_map_info(multipath) + rc, part_mapping = self._execute( + 'ShowMap', 'part=%s' % part_id) + map_chl, map_lun = self._get_mapping_info(multipath) + lun_id = map_lun[0] + save_id = lun_id + + while True: + rc, iqns, ips, luns = self._exec_iscsi_create_map(map_chl, + part_mapping, + host, + part_id, + lun_id, + host_filter, + system_id, + net_list) + if rc == 20: + self._delete_all_map(part_id) + lun_id = self._find_next_lun_id(lun_id, save_id) + else: + break + + return iqns, ips, luns + + def _exec_iscsi_create_map(self, channel_dict, part_mapping, host, + part_id, lun_id, host_filter, system_id, + net_list): + iqns = [] + ips = [] + luns = [] + rc = 0 + for controller in sorted(channel_dict.keys()): + for channel_id in sorted(channel_dict[controller]): + target_id = self.target_dict[controller][channel_id] + exist_lun_id = self._check_map( + channel_id, target_id, part_mapping, host) + + if exist_lun_id < 0: + commands = ( + 'part', part_id, channel_id, target_id, lun_id, + host_filter + ) + rc, out = self._execute('CreateMap', *commands) + if (rc == 20) or (rc == 1): + # LUN Conflict detected. + msg = _('Volume[%(part_id)s] LUN conflict detected, ' + 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { + 'part_id': part_id, 'Ch': channel_id, + 'tid': target_id, 'lun': lun_id} + LOG.warning(msg) + return 20, 0, 0, 0 + if rc != 0: + msg = _('Volume[%(part_id)s] create map failed, ' + 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { + 'part_id': part_id, 'Ch': channel_id, + 'tid': target_id, 'lun': lun_id} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + exist_lun_id = int(lun_id) + self.map_dict[controller][channel_id].remove(exist_lun_id) + + mcs_id = self._get_mcs_id(channel_id, controller) + # There might be some channels in the same group + for channel in self.mcs_dict[controller][mcs_id]: + target_id = self.target_dict[controller][channel] + map_ch_info = { + 'system_id': system_id, + 'mcs_id': mcs_id, + 'target_id': target_id, + 'controller': controller, + } + iqns.append(self._generate_iqn(map_ch_info)) + ips.append(self._get_ip_by_channel( + channel, net_list, controller)) + luns.append(exist_lun_id) + + return rc, iqns, ips, luns + + def _check_map(self, channel_id, target_id, part_map_info, host): + if len(part_map_info) > 0: + for entry in part_map_info: + if (entry['Ch'] == channel_id and + entry['Target'] == target_id and + entry['Host-ID'].lower() == host.lower()): + return int(entry['LUN']) + return -1 + + def _create_host_filter(self, host): + if self.protocol == 'iSCSI': + host_filter = 'iqn=%s' % host + else: + host_filter = 'wwn=%s' % host + return host_filter + + def _get_extraspecs_dict(self, volume_type_id): + extraspecs = {} + if volume_type_id: + extraspecs = volume_types.get_volume_type_extra_specs( + volume_type_id) + return extraspecs + + def _get_volume_pool_id(self, volume): + pool_name = volume['host'].split('#')[-1] + pool_id = self._find_pool_id_by_name(pool_name) + + if not pool_id: + msg = _('Failed to get pool id with pool %(pool_name)s.') % { + 'pool_name': pool_name} + LOG.error(msg) + raise exception.VolumeDriverException(data=msg) + + return pool_id + + def _get_volume_type_extraspecs(self, volume): + """Example for Infortrend extraspecs settings: + + Using a global setting: + infortrend:provisoioning: 'thin' + infortrend:tiering: '0,1,2' + + Using an individual setting: + infortrend:provisoioning: 'LV0:thin;LV1:full' + infortrend:tiering: 'LV0:0,1,3; LV1:1' + + Using a mixed setting: + infortrend:provisoioning: 'LV0:thin;LV1:full' + infortrend:tiering: 'all' + """ + # extraspecs default setting + extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + } + extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) + if extraspecs: + extraspecs_set = self._get_extraspecs_set(extraspecs) + return extraspecs_set + + def _get_pool_extraspecs(self, pool_name, all_extraspecs): + LOG.debug('_Extraspecs_dict: %s', all_extraspecs) + pool_extraspecs = {} + provisioning = None + tiering = None + + # check individual setting + if pool_name in all_extraspecs.keys(): + if 'provisioning' in all_extraspecs[pool_name]: + provisioning = all_extraspecs[pool_name]['provisioning'] + if 'tiering' in all_extraspecs[pool_name]: + tiering = all_extraspecs[pool_name]['tiering'] + + # use global setting + if not provisioning: + provisioning = all_extraspecs['global_provisioning'] + if not tiering: + tiering = all_extraspecs['global_tiering'] + + if tiering != 'all': + pool_id = self._find_pool_id_by_name(pool_name) + if not self._check_tier_pool_or_not(pool_id): + LOG.warning('Infortrend pool: [%(pool_name)s] ' + 'is not a tier pool. Skip tiering ' + '%(tiering)s because it is invalid.', { + 'pool_name': pool_name, + 'tiering': tiering}) + self._check_extraspecs_conflict(tiering, provisioning) + + pool_extraspecs['provisioning'] = provisioning + pool_extraspecs['tiering'] = tiering + + for key, value in pool_extraspecs.items(): + if 'Err' in value: + err, user_setting = value.split(':', 1) + msg = _('Extraspecs Error, ' + 'pool: [%(pool)s], %(key)s: %(setting)s ' + 'is invalid, please check.') % { + 'pool': pool_name, + 'key': key, + 'setting': user_setting} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return pool_extraspecs + + def _check_extraspecs_conflict(self, tiering, provisioning): + if len(tiering) > 1 and provisioning == 'full': + msg = _('When provision is full, ' + 'it must specify only one tier instead of ' + '%(tiering)s tiers.') % { + 'tiering': tiering} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + def _get_extraspecs_set(self, extraspecs): + """Return extraspecs settings dictionary + + Legal values: + provisioning: 'thin', 'full' + tiering: 'all' or combination of 0,1,2,3 + + Only global settings example: + extraspecs_set = { + 'global_provisioning': 'thin', + 'global_tiering': '[0, 1]', + } + + All individual settings example: + extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + 'LV0': { + 'provisioning': 'thin', + 'tiering': [0, 1, 3], + }, + 'LV1': { + 'provisioning': 'full', + 'tiering': [1], + } + } + + Mixed settings example: + extraspecs_set = { + 'global_provisioning': 'thin', + 'global_tiering': 'all', + 'LV0': { + 'tiering': [0, 1, 3], + }, + 'LV1': { + 'provisioning': 'full', + 'tiering': [1], + } + } + + Use global settings if a pool has no individual settings. + """ + # extraspecs default setting + extraspecs_set = { + 'global_provisioning': 'full', + 'global_tiering': 'all', + } + + provisioning_string = extraspecs.get(self.PROVISIONING_KEY, None) + tiering_string = extraspecs.get(self.TIERING_SET_KEY, None) + + extraspecs_set = self._get_provisioning_setting( + extraspecs_set, provisioning_string) + + extraspecs_set = self._get_tiering_setting( + extraspecs_set, tiering_string) + + return extraspecs_set + + def _get_provisioning_setting(self, extraspecs_set, provisioning_string): + # provisioning individual setting + if provisioning_string and ':' in provisioning_string: + provisioning_string = provisioning_string.replace(' ', '') + provisioning_string = provisioning_string.split(';') + + for provisioning in provisioning_string: + pool, value = provisioning.split(':', 1) + + if pool not in self.pool_dict.keys(): + LOG.warning('Infortrend:provisioning ' + 'this setting %(pool)s:%(value)s, ' + 'pool [%(pool)s] not set in config.', { + 'pool': pool, + 'value': value}) + else: + if pool not in extraspecs_set.keys(): + extraspecs_set[pool] = {} + + if value.lower() in self.PROVISIONING_VALUES: + extraspecs_set[pool]['provisioning'] = value.lower() + else: + extraspecs_set[pool]['provisioning'] = 'Err:%s' % value + LOG.warning('Infortrend:provisioning ' + 'this setting %(pool)s:%(value)s, ' + '[%(value)s] is illegal', { + 'pool': pool, + 'value': value}) + # provisioning global setting + elif provisioning_string: + provisioning = provisioning_string.replace(' ', '').lower() + if provisioning in self.PROVISIONING_VALUES: + extraspecs_set['global_provisioning'] = provisioning + else: + extraspecs_set['global_provisioning'] = 'Err:%s' % provisioning + LOG.warning('Infortrend:provisioning ' + '[%(value)s] is illegal', { + 'value': provisioning_string}) + return extraspecs_set + + def _get_tiering_setting(self, extraspecs_set, tiering_string): + # tiering individual setting + if tiering_string and ':' in tiering_string: + tiering_string = tiering_string.replace(' ', '') + tiering_string = tiering_string.split(';') + + for tiering_set in tiering_string: + pool, value = tiering_set.split(':', 1) + + if pool not in self.pool_dict.keys(): + LOG.warning('Infortrend:tiering ' + 'this setting %(pool)s:%(value)s, ' + 'pool [%(pool)s] not set in config.', { + 'pool': pool, + 'value': value}) + else: + if pool not in extraspecs_set.keys(): + extraspecs_set[pool] = {} + + if value.lower() == 'all': + extraspecs_set[pool]['tiering'] = 'all' + else: + value = value.split(',') + value = [int(i) for i in value] + value = list(set(value)) + + if value[-1] in self.TIERING_VALUES: + extraspecs_set[pool]['tiering'] = value + else: + extraspecs_set[pool]['tiering'] = 'Err:%s' % value + LOG.warning('Infortrend:tiering ' + 'this setting %(pool)s:%(value)s, ' + '[%(err_value)s] is illegal', { + 'pool': pool, + 'value': value, + 'err_value': value[-1]}) + # tiering global setting + elif tiering_string: + tiering_set = tiering_string.replace(' ', '').lower() + + if tiering_set != 'all': + tiering_set = tiering_set.split(',') + tiering_set = [int(i) for i in tiering_set] + tiering_set = list(set(tiering_set)) + + if tiering_set[-1] in range(4): + extraspecs_set['global_tiering'] = tiering_set + else: + extraspecs_set['global_tiering'] = 'Err:%s' % tiering_set + LOG.warning('Infortrend:tiering ' + '[%(err_value)s] is illegal', { + 'err_value': tiering_set[-1]}) + return extraspecs_set + + def _find_pool_id_by_name(self, pool_name): + if pool_name in self.pool_dict.keys(): + return self.pool_dict[pool_name] + else: + msg = _('Pool [%(pool_name)s] not set in cinder conf.') % { + 'pool_name': pool_name} + LOG.error(msg) + raise exception.VolumeDriverException(data=msg) + + def _get_system_id(self, system_ip): + if not self.system_id: + rc, device_info = self._execute('ShowDevice') + for entry in device_info: + if system_ip == entry['Connected-IP']: + self.system_id = str(int(entry['ID'], 16)) + return self.system_id + + @log_func + def _get_lun_id(self, ch_id, controller='slot_a'): + lun_id = -1 + + if len(self.map_dict[controller][ch_id]) > 0: + lun_id = self.map_dict[controller][ch_id][0] + + if lun_id == -1: + msg = _('LUN number is out of bound ' + 'on channel id: %(ch_id)s.') % {'ch_id': ch_id} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + else: + return lun_id + + @log_func + def _get_mapping_info(self, multipath): + if multipath: + return self._get_mapping_info_with_mpio() + else: + return self._get_mapping_info_with_normal() + + def _get_mapping_info_with_mpio(self): + """Get all mapping channel id and minimun lun id mapping info. + + # R model with mcs + map_chl = { + 'slot_a': ['2', '0'] + 'slot_b': ['0', '3'] + } + map_lun = ['0'] + + # G model with mcs + map_chl = { + 'slot_a': ['1', '2'] + } + map_lun = ['0'] + + mcs_dict = { + 'slotX' = { + 'MCSID': ['chID', 'chID'] + } + } + + :returns: all mapping channel id per slot and minimun lun id + """ + map_chl = { + 'slot_a': [] + } + if self._model_type == 'R': + map_chl['slot_b'] = [] + + # MPIO: Map all the channels specified in conf file + # If MCS groups exist, only map to the minimum channel id per group + for controller in map_chl.keys(): + for mcs in self.mcs_dict[controller]: + map_mcs_chl = sorted((self.mcs_dict[controller][mcs]))[0] + map_chl[controller].append(map_mcs_chl) + + map_lun = self._get_minimum_common_lun_id(map_chl) + + if not map_lun: + msg = _('Cannot find a common lun id for mapping.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return map_chl, map_lun + + def _get_minimum_common_lun_id(self, channel_dict): + """Find the minimun common lun id in all channels.""" + map_lun = [] + # search for free lun id on all channels + for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): + lun_id_is_used = False + for controller in channel_dict.keys(): + for channel_id in channel_dict[controller]: + if lun_id not in self.map_dict[controller][channel_id]: + lun_id_is_used = True + if not lun_id_is_used: + map_lun.append(str(lun_id)) + break + # check lun id overflow + elif (lun_id == self.constants['MAX_LUN_MAP_PER_CHL'] - 1): + msg = _('LUN map has reached maximum value [%(max_lun)s].') % { + 'max_lun': self.constants['MAX_LUN_MAP_PER_CHL']} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return map_lun + + @log_func + def _get_mapping_info_with_normal(self): + """Get the minimun mapping channel id and lun id mapping info. + + # G model and R model + map_chl = { + 'slot_a': ['1'] + } + map_lun = ['0'] + + :returns: minimun mapping channel id per slot and lun id + """ + map_chl = { + 'slot_a': [] + } + map_lun = [] + + ret_chl = self._get_minimun_mapping_channel_id('slot_a') + lun_id = self._get_lun_id(ret_chl, 'slot_a') + + map_chl['slot_a'].append(ret_chl) + map_lun.append(str(lun_id)) + + return map_chl, map_lun + + @log_func + def _get_minimun_mapping_channel_id(self, controller): + empty_lun_num = 0 + min_map_chl = -1 + + # Sort items to get a reliable behaviour. Dictionary items + # are iterated in a random order because of hash randomization. + # We don't care MCS group here, single path working as well. + for mcs in sorted(self.mcs_dict[controller].keys()): + mcs_chl = sorted((self.mcs_dict[controller][mcs]))[0] + free_lun_num = len(self.map_dict[controller][mcs_chl]) + if empty_lun_num < free_lun_num: + min_map_chl = mcs_chl + empty_lun_num = free_lun_num + + if int(min_map_chl) < 0: + msg = _('LUN map overflow on every channel.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + else: + return min_map_chl + + def _get_common_lun_map_id(self, wwpn_channel_info): + map_lun = None + # search for free lun id on all channels + for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): + lun_id_is_used = False + for slot_name in ['slot_a', 'slot_b']: + for wwpn in wwpn_channel_info: + channel_id = wwpn_channel_info[wwpn]['channel'] + if channel_id not in self.map_dict[slot_name]: + continue + elif lun_id not in self.map_dict[slot_name][channel_id]: + lun_id_is_used = True + if not lun_id_is_used: + map_lun = lun_id + break + # check lun id overflow + elif (lun_id == self.constants['MAX_LUN_MAP_PER_CHL'] - 1): + msg = _('LUN map has reached maximum value [%(max_lun)s].') % { + 'max_lun': self.constants['MAX_LUN_MAP_PER_CHL']} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return map_lun + + def _get_mcs_id(self, channel_id, controller): + mcs_id = None + + for mcs in self.mcs_dict[controller]: + if channel_id in self.mcs_dict[controller][mcs]: + mcs_id = mcs + break + + if mcs_id is None: + msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % { + 'channel_id': channel_id} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return mcs_id + + def _concat_provider_location(self, model_dict): + keys = sorted(model_dict.keys()) + return '@'.join([i + '^' + str(model_dict[i]) for i in keys]) + + def delete_volume(self, volume): + """Delete the specific volume.""" + + if not volume['provider_location']: + LOG.warning('Volume %(volume_name)s ' + 'provider location not stored.', { + 'volume_name': volume['name']}) + return + + have_map = False + + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + (check_exist, have_map, part_id) = ( + self._check_volume_exist(volume['id'], part_id) + ) + + if not check_exist: + LOG.warning('Volume %(volume_id)s already deleted.', { + 'volume_id': volume['id']}) + return + + if have_map: + self._execute('DeleteMap', 'part', part_id, '-y') + + self._execute('DeletePartition', part_id, '-y') + + LOG.info('Delete Volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + + def _check_replica_completed(self, replica): + if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or + (replica['Type'] == 'Mirror' and + replica['Status'] == 'Mirror')): + return True + # show the progress percentage + status = replica['Progress'].lower() + LOG.info('Replica from %(source_type)s: [%(source_name)s] ' + 'progess [%(progess)s].', { + 'source_type': replica['Source-Type'], + 'source_name': replica['Source-Name'], + 'progess': status}) + return False + + def _check_volume_exist(self, volume_id, part_id): + check_exist = False + have_map = False + + rc, part_list = self._execute('ShowPartition', '-l') + + if part_id: + key = 'ID' + find_key = part_id + else: + key = 'Name' + find_key = volume_id + + for entry in part_list: + if entry[key] == find_key: + check_exist = True + if entry['Mapped'] == 'true': + have_map = True + if not part_id: + part_id = entry['ID'] + break + + if check_exist: + return (check_exist, have_map, part_id) + else: + return (False, False, None) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the volume by volume copy.""" + + # Step1 create a snapshot of the volume + src_part_id = self._extract_specific_provider_location( + src_vref['provider_location'], 'partition_id') + + if src_part_id is None: + src_part_id = self._get_part_id(volume['id']) + + model_update = self._create_volume_from_volume(volume, src_part_id) + + LOG.info('Create Cloned Volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + return model_update + + def _create_volume_from_volume(self, dst_volume, src_part_id): + # create the target volume for volume copy + self._create_partition_by_default(dst_volume) + + dst_part_id = self._get_part_id(dst_volume['id']) + # prepare return value + system_id = self._get_system_id(self.ip) + model_dict = { + 'system_id': system_id, + 'partition_id': dst_part_id, + } + + model_info = self._concat_provider_location(model_dict) + model_update = {"provider_location": model_info} + + # clone the volume from the origin partition + commands = ( + 'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id + ) + self._execute('CreateReplica', *commands) + self._wait_replica_complete(dst_part_id) + + return model_update + + def _extract_specific_provider_location(self, provider_location, key): + if not provider_location: + msg = _('Failed to get provider location.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + provider_location_dict = self._extract_all_provider_location( + provider_location) + + result = provider_location_dict.get(key, None) + return result + + @log_func + def _extract_all_provider_location(self, provider_location): + provider_location_dict = {} + dict_entry = provider_location.split("@") + for entry in dict_entry: + key, value = entry.split('^', 1) + if value == 'None': + value = None + provider_location_dict[key] = value + + return provider_location_dict + + def create_export(self, context, volume): + model_update = volume['provider_location'] + + LOG.info('Create export done from Volume %(volume_id)s.', { + 'volume_id': volume['id']}) + + return {'provider_location': model_update} + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If refresh is True, update the status first. + """ + if self._volume_stats is None or refresh: + self._update_volume_stats() + + LOG.info( + 'Successfully update volume stats. ' + 'backend: %(volume_backend_name)s, ' + 'vendor: %(vendor_name)s, ' + 'model_type: %(model_type)s, ' + 'system_id: %(system_id)s, ' + 'status: %(status)s, ' + 'driver_version: %(driver_version)s, ' + 'storage_protocol: %(storage_protocol)s.', self._volume_stats) + + return self._volume_stats + + def _update_volume_stats(self): + # Ensure the CLI is connected. + status = self._check_connection() + + # Refresh cache + rc, out = self._execute('InitCache') + if rc != 0: + LOG.Warning('[InitCache Failed]') + + self.backend_name = self.configuration.safe_get('volume_backend_name') + system_id = self._get_system_id(self.ip) + data = { + 'volume_backend_name': self.backend_name, + 'vendor_name': 'Infortrend', + 'driver_version': self.VERSION, + 'storage_protocol': self.protocol, + 'model_type': self._model_type, + 'system_id': system_id, + 'status': status, + 'pools': self._update_pools_stats(system_id), + } + self._volume_stats = data + + def _check_connection(self): + rc, out = self._execute('CheckConnection') + if rc == 0: + return 'Connected' + elif rc in (9, 13): + self._init_raid_connection() + self._set_raidcmd() + return 'Reconnected' + else: + return 'Error: %s' % out + + def _update_pools_stats(self, system_id): + self._update_pool_tiers() + enable_specs_dict = self._get_enable_specs_on_array() + + if 'Thin Provisioning' in enable_specs_dict.keys(): + provisioning_support = True + else: + provisioning_support = False + + rc, pools_info = self._execute('ShowLV') + pools = [] + + if provisioning_support: + rc, part_list = self._execute('ShowPartition') + + for pool in pools_info: + if pool['Name'] in self.pool_dict.keys(): + total_space = float(pool['Size'].split(' ', 1)[0]) + available_space = float(pool['Available'].split(' ', 1)[0]) + + total_capacity_gb = round(mi_to_gi(total_space), 2) + free_capacity_gb = round(mi_to_gi(available_space), 2) + + _pool = { + 'pool_name': pool['Name'], + 'pool_id': pool['ID'], + 'location_info': 'Infortrend:%s' % system_id, + 'total_capacity_gb': total_capacity_gb, + 'free_capacity_gb': free_capacity_gb, + 'reserved_percentage': 0, + 'QoS_support': False, + 'thick_provisioning_support': True, + 'thin_provisioning_support': provisioning_support, + } + + if provisioning_support: + provisioning_factor = self.configuration.safe_get( + 'max_over_subscription_ratio') + provisioned_space = self._get_provisioned_space( + pool['ID'], part_list) + provisioned_capacity_gb = round( + mi_to_gi(provisioned_space), 2) + _pool['provisioned_capacity_gb'] = provisioned_capacity_gb + _pool['max_over_subscription_ratio'] = float( + provisioning_factor) + + pools.append(_pool) + + return pools + + def _get_provisioned_space(self, pool_id, part_list): + provisioning_space = 0 + for entry in part_list: + if entry['LV-ID'] == pool_id: + provisioning_space += int(entry['Size']) + return provisioning_space + + def _update_pool_tiers(self): + """Setup the tier pools information. + + tier_pools_dict = { + '12345678': [0, 1, 2, 3], # Pool 12345678 has 4 tiers: 0, 1, 2, 3 + '87654321': [0, 1, 3], # Pool 87654321 has 3 tiers: 0, 1, 3 + } + """ + rc, lv_info = self._execute('ShowLV', 'tier') + + temp_dict = {} + for entry in lv_info: + if entry['LV-Name'] in self.pool_dict.keys(): + if entry['LV-ID'] not in temp_dict.keys(): + temp_dict[entry['LV-ID']] = [] + temp_dict[entry['LV-ID']].append(int(entry['Tier'])) + + self.tier_pools_dict = temp_dict + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + + volume_id = snapshot['volume_id'] + + LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.', + {'snapshot': snapshot['id'], 'volume': volume_id}) + + model_update = {} + part_id = self._get_part_id(volume_id) + + if not part_id: + msg = _('Failed to get Partition ID for volume %(volume_id)s.') % { + 'volume_id': volume_id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + @lockutils.synchronized( + 'snapshot-' + part_id, 'infortrend-', True) + def do_create_snapshot(): + self._execute('CreateSnapshot', 'part', part_id, + 'name=%s' % snapshot['id']) + rc, tmp_snapshot_list = self._execute( + 'ShowSnapshot', 'part=%s' % part_id) + return tmp_snapshot_list + + snapshot_list = do_create_snapshot() + + LOG.info( + 'Create success. ' + 'Snapshot: %(snapshot)s, ' + 'Snapshot ID in raid: %(raid_snapshot_id)s, ' + 'volume: %(volume)s.', { + 'snapshot': snapshot['id'], + 'raid_snapshot_id': snapshot_list[-1]['SI-ID'], + 'volume': volume_id}) + model_update['provider_location'] = snapshot_list[-1]['SI-ID'] + return model_update + + def delete_snapshot(self, snapshot): + """Delete the snapshot.""" + + volume_id = snapshot['volume_id'] + + LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.', + {'snapshot': snapshot['id'], 'volume': volume_id}) + + raid_snapshot_id = snapshot.get('provider_location') + + if raid_snapshot_id: + self._execute('DeleteSnapshot', raid_snapshot_id, '-y') + LOG.info('Delete Snapshot %(snapshot_id)s completed.', { + 'snapshot_id': snapshot['id']}) + else: + LOG.warning('Snapshot %(snapshot_id)s ' + 'provider_location not stored.', { + 'snapshot_id': snapshot['id']}) + + def _get_part_id(self, volume_id, pool_id=None): + count = 0 + while True: + if count == 2: + rc, part_list = self._execute('ShowPartition') + else: + rc, part_list = self._execute('ShowPartition') + + for entry in part_list: + if pool_id is None: + if entry['Name'] == volume_id: + return entry['ID'] + else: + if (entry['Name'] == volume_id and + entry['LV-ID'] == pool_id): + return entry['ID'] + + if count >= 3: + msg = _('Failed to get partition info ' + 'from volume_id: %(volume_id)s.') % { + 'volume_id': volume_id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + time.sleep(4) + count = count + 1 + return + + def create_volume_from_snapshot(self, volume, snapshot): + + raid_snapshot_id = snapshot.get('provider_location') + if raid_snapshot_id is None: + msg = _('Failed to get Raid Snapshot ID ' + 'from snapshot: %(snapshot_id)s.') % { + 'snapshot_id': snapshot['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._create_partition_by_default(volume) + dst_part_id = self._get_part_id(volume['id']) + + # clone the volume from the snapshot + commands = ( + 'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id + ) + self._execute('CreateReplica', *commands) + self._wait_replica_complete(dst_part_id) + + # prepare return value + system_id = self._get_system_id(self.ip) + model_dict = { + 'system_id': system_id, + 'partition_id': dst_part_id, + } + model_info = self._concat_provider_location(model_dict) + + LOG.info( + 'Create Volume %(volume_id)s from ' + 'snapshot %(snapshot_id)s completed.', { + 'volume_id': volume['id'], + 'snapshot_id': snapshot['id']}) + + return {"provider_location": model_info} + + def initialize_connection(self, volume, connector): + system_id = self._get_system_id(self.ip) + LOG.debug('Connector_info: %s', connector) + + @lockutils.synchronized( + '%s-connection' % system_id, 'infortrend-', True) + def lock_initialize_conn(): + if self.protocol == 'iSCSI': + multipath = connector.get('multipath', False) + return self._initialize_connection_iscsi( + volume, connector, multipath) + elif self.protocol == 'FC': + return self._initialize_connection_fc( + volume, connector) + else: + msg = _('Unknown protocol: %(protocol)s.') % { + 'protocol': self.protocol} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + return lock_initialize_conn() + + def _initialize_connection_fc(self, volume, connector): + self._init_map_info() + + map_lun, target_wwpns, initiator_target_map = ( + self._do_fc_connection(volume, connector) + ) + + properties = self._generate_fc_connection_properties( + map_lun, target_wwpns, initiator_target_map) + + LOG.info('Successfully initialized connection. ' + 'target_wwn: %(target_wwn)s, ' + 'initiator_target_map: %(initiator_target_map)s, ' + 'lun: %(target_lun)s.', properties['data']) + fczm_utils.add_fc_zone(properties) + return properties + + @log_func + def _do_fc_connection(self, volume, connector): + target_wwpns = [] + + partition_data = self._extract_all_provider_location( + volume['provider_location']) + part_id = partition_data['partition_id'] + + if part_id is None: + part_id = self._get_part_id(volume['id']) + + wwpn_list, wwpn_channel_info = self._get_wwpn_list() + + initiator_target_map, target_wwpns = self._build_initiator_target_map( + connector, wwpn_list) + + rc, part_mapping = self._execute('ShowMap', 'part=%s' % part_id) + + map_lun_list = [] + + # We need to check all the maps first + # Because fibre needs a consistent lun id + for initiator_wwpn in sorted(initiator_target_map): + for target_wwpn in initiator_target_map[initiator_wwpn]: + ch_id = wwpn_channel_info[target_wwpn.upper()]['channel'] + controller = wwpn_channel_info[target_wwpn.upper()]['slot'] + target_id = self.target_dict[controller][ch_id] + + exist_lun_id = self._check_map( + ch_id, target_id, part_mapping, initiator_wwpn) + map_lun_list.append(exist_lun_id) + + # To check if already mapped + if (map_lun_list.count(map_lun_list[0]) == len(map_lun_list) and + map_lun_list[0] != -1): + map_lun = map_lun_list[0] + LOG.info('Already has map. volume: [%(volume)s], ' + 'mapped_lun_list: %(list)s, ', { + 'volume': volume['id'], + 'list': map_lun_list}) + return map_lun, target_wwpns, initiator_target_map + + # Update used LUN list + self._update_map_info(True) + map_lun = self._get_common_lun_map_id(wwpn_channel_info) + save_lun = map_lun + while True: + ret = self._create_new_fc_maps( + initiator_wwpn, initiator_target_map, target_wwpn, + wwpn_channel_info, part_id, map_lun) + if ret == 20: + # Clean up the map for following re-create + self._delete_all_map(part_id) + map_lun = self._find_next_lun_id(map_lun, save_lun) + else: + break + + return map_lun, target_wwpns, initiator_target_map + + def _create_new_fc_maps(self, initiator_wwpn, initiator_target_map, + target_wwpn, wwpn_channel_info, part_id, map_lun): + for initiator_wwpn in sorted(initiator_target_map): + for target_wwpn in initiator_target_map[initiator_wwpn]: + ch_id = wwpn_channel_info[target_wwpn.upper()]['channel'] + controller = wwpn_channel_info[target_wwpn.upper()]['slot'] + target_id = self.target_dict[controller][ch_id] + host_filter = self._create_host_filter(initiator_wwpn) + commands = ( + 'part', part_id, ch_id, target_id, str(map_lun), + host_filter + ) + rc, out = self._execute('CreateMap', *commands) + if (rc == 20) or (rc == 1): + msg = _('Volume[%(part_id)s] LUN conflict detected,' + 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { + 'part_id': part_id, 'Ch': ch_id, + 'tid': target_id, 'lun': map_lun} + LOG.warning(msg) + return 20 + elif rc != 0: + msg = _('Volume[%(part_id)s] create map failed, ' + 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { + 'part_id': part_id, 'Ch': ch_id, + 'tid': target_id, 'lun': map_lun} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + if map_lun in self.map_dict[controller][ch_id]: + self.map_dict[controller][ch_id].remove(map_lun) + return rc + + def _build_initiator_target_map(self, connector, all_target_wwpns): + initiator_target_map = {} + target_wwpns = [] + + if self.fc_lookup_service: + lookup_map = ( + self.fc_lookup_service.get_device_mapping_from_network( + connector['wwpns'], all_target_wwpns) + ) + for fabric_name in lookup_map: + fabric = lookup_map[fabric_name] + target_wwpns.extend(fabric['target_port_wwn_list']) + for initiator in fabric['initiator_port_wwn_list']: + initiator_target_map[initiator] = ( + fabric['target_port_wwn_list'] + ) + else: + initiator_wwns = connector['wwpns'] + target_wwpns = all_target_wwpns + for initiator in initiator_wwns: + initiator_target_map[initiator] = all_target_wwpns + + return initiator_target_map, target_wwpns + + def _generate_fc_connection_properties( + self, lun_id, target_wwpns, initiator_target_map): + + return { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'target_lun': lun_id, + 'target_wwn': target_wwpns, + 'initiator_target_map': initiator_target_map, + }, + } + + def _find_next_lun_id(self, lun_id, save_id): + lun_id = lun_id + 1 + if lun_id == self.constants['MAX_LUN_MAP_PER_CHL']: + lun_id = 0 + elif lun_id == save_id: + msg = _('No available LUN among [%(max_lun)s] LUNs.' + ) % {'max_lun': + self.constants['MAX_LUN_MAP_PER_CHL']} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + return lun_id + + @log_func + def _initialize_connection_iscsi(self, volume, connector, multipath): + self._init_map_info() + + partition_data = self._extract_all_provider_location( + volume['provider_location']) # system_id, part_id + + system_id = partition_data['system_id'] + part_id = partition_data['partition_id'] + if part_id is None: + part_id = self._get_part_id(volume['id']) + + self._set_host_iqn(connector['initiator']) + + iqns, ips, luns = self._iscsi_create_map( + part_id, multipath, connector['initiator'], system_id) + + properties = self._generate_iscsi_connection_properties( + iqns, ips, luns, volume, multipath) + LOG.info('Successfully initialized connection ' + 'with volume: %(volume_id)s.', properties['data']) + return properties + + def _set_host_iqn(self, host_iqn): + + rc, iqn_list = self._execute('ShowIQN') + + check_iqn_exist = False + for entry in iqn_list: + if entry['IQN'] == host_iqn: + check_iqn_exist = True + break + + if not check_iqn_exist: + self._execute( + 'CreateIQN', host_iqn, self._truncate_host_name(host_iqn)) + + def _truncate_host_name(self, iqn): + if len(iqn) > 16: + return iqn[-16:] + else: + return iqn + + @log_func + def _generate_iqn(self, channel_info): + slot_id = 1 if channel_info['controller'] == 'slot_a' else 2 + return self.iqn % ( + channel_info['system_id'], + channel_info['mcs_id'], + channel_info['target_id'], + slot_id) + + @log_func + def _get_ip_by_channel( + self, channel_id, net_list, controller='slot_a'): + + slot_name = 'slotA' if controller == 'slot_a' else 'slotB' + + for entry in net_list: + if entry['ID'] == channel_id and entry['Slot'] == slot_name: + if entry['IPv4'] == '0.0.0.0': + msg = _( + 'Please set ip on Channel[%(channel_id)s] ' + 'with controller[%(controller)s].') % { + 'channel_id': channel_id, 'controller': slot_name} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + else: + return entry['IPv4'] + + msg = _( + 'Can not find channel[%(channel_id)s] ' + 'with controller[%(controller)s].') % { + 'channel_id': channel_id, 'controller': slot_name} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + return + + def _get_wwpn_list(self): + rc, wwn_list = self._execute('ShowWWN') + + wwpn_list = [] + wwpn_channel_info = {} + + for entry in wwn_list: + channel_id = entry['CH'] + if 'BID' in entry['ID']: + slot_name = 'slot_b' + else: + slot_name = 'slot_a' + + if channel_id in self.map_dict[slot_name]: + wwpn_list.append(entry['WWPN']) + + wwpn_channel_info[entry['WWPN']] = { + 'channel': channel_id, + 'slot': slot_name, + } + + return wwpn_list, wwpn_channel_info + + @log_func + def _generate_iscsi_connection_properties( + self, iqns, ips, luns, volume, multipath): + + portals = [] + + for i in range(len(ips)): + discovery_ip = '%s:%s' % ( + ips[i], self.constants['ISCSI_PORT']) + discovery_iqn = iqns[i] + portals.append(discovery_ip) + + if not self._do_iscsi_discovery(discovery_iqn, discovery_ip): + msg = _( + 'Could not find iSCSI target ' + 'for volume: [%(volume_id)s] ' + 'portal: [%(discovery_ip)s] ' + 'iqn: [%(discovery_iqn)s]' + 'for path: [%(i)s/%(len)s]') % { + 'volume_id': volume['id'], + 'discovery_ip': discovery_ip, + 'discovery_iqn': discovery_iqn, + 'i': i + 1, 'len': len(ips)} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + properties = { + 'target_discovered': True, + 'target_iqn': iqns[0], + 'target_portal': portals[0], + 'target_lun': luns[0], + 'volume_id': volume['id'], + } + + if multipath: + properties['target_iqns'] = iqns + properties['target_portals'] = portals + properties['target_luns'] = luns + + if 'provider_auth' in volume: + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + @log_func + def _do_iscsi_discovery(self, target_iqn, target_ip): + rc, out = self._execute( + 'ExecuteCommand', + 'iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', + target_ip, + run_as_root=True) + + if rc != 0: + LOG.error( + 'Can not discovery in %(target_ip)s with %(target_iqn)s.', { + 'target_ip': target_ip, 'target_iqn': target_iqn}) + return False + else: + for target in out.splitlines(): + if target_iqn in target and target_ip in target: + return True + return False + + def extend_volume(self, volume, new_size): + + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + if part_id is None: + part_id = self._get_part_id(volume['id']) + + expand_size = new_size - volume['size'] + + if '.' in ('%s' % expand_size): + expand_size = round(gi_to_mi(float(expand_size))) + expand_command = 'size=%sMB' % expand_size + else: + expand_command = 'size=%sGB' % expand_size + + self._execute('SetPartition', 'expand', part_id, expand_command) + + LOG.info( + 'Successfully extended volume %(volume_id)s to size %(size)s.', { + 'volume_id': volume['id'], 'size': new_size}) + + def terminate_connection(self, volume, connector): + system_id = self._get_system_id(self.ip) + + @lockutils.synchronized( + '%s-connection' % system_id, 'infortrend-', True) + def lock_terminate_conn(): + conn_info = None + + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + if part_id is None: + part_id = self._get_part_id(volume['id']) + + # Support for force detach volume + if not connector: + self._delete_all_map(part_id) + LOG.warning( + 'Connection Info Error: detach all connections ' + 'for volume: %(volume_id)s.', { + 'volume_id': volume['id']}) + return + + self._delete_host_map(part_id, connector) + + # Check if this iqn is none used + if self.protocol == 'iSCSI': + lun_map_exist = self._check_initiator_has_lun_map( + connector['initiator']) + if not lun_map_exist: + host_name = self._truncate_host_name( + connector['initiator']) + self._execute('DeleteIQN', host_name) + + # FC should return info + elif self.protocol == 'FC': + conn_info = {'driver_volume_type': 'fibre_channel', + 'data': {}} + + lun_map_exist = self._check_initiator_has_lun_map( + connector['wwpns']) + if not lun_map_exist: + wwpn_list, wwpn_channel_info = self._get_wwpn_list() + init_target_map, target_wwpns = ( + self._build_initiator_target_map(connector, wwpn_list) + ) + conn_info['data']['initiator_target_map'] = init_target_map + + LOG.info( + 'Successfully terminated connection ' + 'for volume: %(volume_id)s.', { + 'volume_id': volume['id']}) + + fczm_utils.remove_fc_zone(conn_info) + return conn_info + return lock_terminate_conn() + + def _delete_host_map(self, part_id, connector): + count = 0 + while True: + rc, part_map_info = self._execute('ShowMap', 'part=%s' % part_id) + if len(part_map_info) > 0: + break + elif count > 2: + # in case of noinit fails + rc, part_map_info = self._execute('ShowMap', + 'part=%s' % part_id) + break + else: + count = count + 1 + + if self.protocol == 'iSCSI': + host = connector['initiator'].lower() + host = (host,) + elif self.protocol == 'FC': + host = [x.lower() for x in connector['wwpns']] + + temp_ch = None + temp_tid = None + temp_lun = None + + # The default result of ShowMap is ordered by Ch-Target-LUN + # The same lun-map might have different host filters + # We need to specify Ch-Target-LUN and delete it only once + if len(part_map_info) > 0: + for entry in part_map_info: + if entry['Host-ID'].lower() in host: + if not (entry['Ch'] == temp_ch and + entry['Target'] == temp_tid and + entry['LUN'] == temp_lun): + self._execute( + 'DeleteMap', 'part', part_id, entry['Ch'], + entry['Target'], entry['LUN'], '-y') + temp_ch = entry['Ch'] + temp_tid = entry['Target'] + temp_lun = entry['LUN'] + return + + def _delete_all_map(self, part_id): + self._execute('DeleteMap', 'part', part_id, '-y') + return + + def migrate_volume(self, volume, host, new_extraspecs=None): + is_valid, dst_pool_id = ( + self._is_valid_for_storage_assisted_migration(host, volume) + ) + if not is_valid: + return (False, None) + + src_pool_id = self._get_volume_pool_id(volume) + + if src_pool_id != dst_pool_id: + + model_dict = self._migrate_volume_with_pool( + volume, dst_pool_id, new_extraspecs) + + model_update = { + "provider_location": + self._concat_provider_location(model_dict), + } + + LOG.info('Migrate Volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + else: + model_update = { + "provider_location": volume['provider_location'], + } + + return (True, model_update) + + def _is_valid_for_storage_assisted_migration(self, host, volume): + + if 'location_info' not in host['capabilities']: + LOG.error('location_info not stored in pool.') + return (False, None) + + vendor = host['capabilities']['location_info'].split(':')[0] + dst_system_id = host['capabilities']['location_info'].split(':')[-1] + + if vendor != 'Infortrend': + LOG.error('Vendor should be Infortrend for migration.') + return (False, None) + + # It should be the same raid for migration + src_system_id = self._get_system_id(self.ip) + if dst_system_id != src_system_id: + LOG.error('Migration must be performed ' + 'on the same Infortrend array.') + return (False, None) + + # We don't support volume live migration + if volume['status'].lower() != 'available': + LOG.error('Volume status must be available for migration.') + return (False, None) + + if 'pool_id' not in host['capabilities']: + LOG.error('Failed to get target pool id.') + return (False, None) + + dst_pool_id = host['capabilities']['pool_id'] + if dst_pool_id is None: + return (False, None) + + return (True, dst_pool_id) + + def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None): + # Get old partition data for delete map + partition_data = self._extract_all_provider_location( + volume['provider_location']) + + src_part_id = partition_data['partition_id'] + + if src_part_id is None: + src_part_id = self._get_part_id(volume['id']) + + # Create New Partition + self._create_partition_with_pool(volume, dst_pool_id, extraspecs) + + dst_part_id = self._get_part_id( + volume['id'], pool_id=dst_pool_id) + + if dst_part_id is None: + msg = _('Failed to get new part id in new pool: %(pool_id)s.') % { + 'pool_id': dst_pool_id} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + # Volume Mirror from old partition into new partition + commands = ( + 'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id, + 'type=mirror' + ) + self._execute('CreateReplica', *commands) + + self._wait_replica_complete(dst_part_id) + + self._execute('DeleteMap', 'part', src_part_id, '-y') + self._execute('DeletePartition', src_part_id, '-y') + + model_dict = { + 'system_id': partition_data['system_id'], + 'partition_id': dst_part_id, + } + + return model_dict + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + """Return model update for migrated volume.""" + + src_volume_id = volume['id'] + dst_volume_id = new_volume['id'] + part_id = self._extract_specific_provider_location( + new_volume['provider_location'], 'partition_id') + + if part_id is None: + part_id = self._get_part_id(dst_volume_id) + + LOG.debug( + 'Rename partition %(part_id)s ' + 'into new volume %(new_volume)s.', { + 'part_id': part_id, 'new_volume': dst_volume_id}) + try: + self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) + except InfortrendCliException: + LOG.exception('Failed to rename %(new_volume)s into ' + '%(volume)s.', {'new_volume': new_volume['id'], + 'volume': volume['id']}) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} + + LOG.info('Update migrated volume %(new_volume)s completed.', { + 'new_volume': new_volume['id']}) + + model_update = { + '_name_id': None, + 'provider_location': new_volume['provider_location'], + } + return model_update + + def _wait_replica_complete(self, part_id): + def _inner(): + check_done = False + try: + rc, replica_list = self._execute('ShowReplica', '-l') + for entry in replica_list: + if (entry['Target'] == part_id and + self._check_replica_completed(entry)): + check_done = True + self._execute('DeleteReplica', entry['Pair-ID'], '-y') + except Exception: + check_done = False + LOG.exception('Cannot detect replica status.') + + if check_done: + raise loopingcall.LoopingCallDone() + + timer = loopingcall.FixedIntervalLoopingCall(_inner) + timer.start(interval=15).wait() + + def _get_enable_specs_on_array(self): + enable_specs = {} + rc, license_list = self._execute('ShowLicense') + + for key, value in license_list.items(): + if value['Support']: + enable_specs[key] = value + + return enable_specs + + def manage_existing_get_size(self, volume, ref): + """Return size of volume to be managed by manage_existing.""" + + volume_data = self._get_existing_volume_ref_data(ref) + volume_pool_id = self._get_volume_pool_id(volume) + + if not volume_data: + msg = _('Specified volume does not exist.') + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=ref, reason=msg) + + if volume_data['Mapped'].lower() != 'false': + msg = _('The specified volume is mapped. ' + 'Please unmap first for Openstack using.') + LOG.error(msg) + raise exception.VolumeDriverException(data=msg) + + if volume_data['LV-ID'] != volume_pool_id: + msg = _('The specified volume pool is wrong.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return int(math.ceil(mi_to_gi(float(volume_data['Size'])))) + + def manage_existing(self, volume, ref): + volume_data = self._get_existing_volume_ref_data(ref) + + if not volume_data: + msg = _('Specified logical volume does not exist.') + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=ref, reason=msg) + + self._execute( + 'SetPartition', volume_data['ID'], 'name=%s' % volume['id']) + + model_dict = { + 'system_id': self._get_system_id(self.ip), + 'partition_id': volume_data['ID'], + } + model_update = { + "provider_location": self._concat_provider_location(model_dict), + } + + LOG.info('Rename Volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + + return model_update + + def _get_existing_volume_ref_data(self, ref): + + if 'source-name' in ref: + key = 'Name' + find_key = ref['source-name'] + elif 'source-id' in ref: + key = 'ID' + find_key = ref['source-id'] + else: + msg = _('Reference must contain source-id or source-name.') + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=ref, reason=msg) + + ref_dict = {} + rc, part_list = self._execute('ShowPartition', '-l') + + for entry in part_list: + if entry[key] == find_key: + ref_dict = entry + break + + return ref_dict + + def unmanage(self, volume): + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + if part_id is None: + part_id = self._get_part_id(volume['id']) + + new_vol_name = self.unmanaged_prefix % volume['id'][:-17] + + self._execute('SetPartition', part_id, 'name=%s' % new_vol_name) + + LOG.info('Unmanage volume %(volume_id)s completed.', { + 'volume_id': volume['id']}) + + def _check_volume_attachment(self, volume): + if not volume['volume_attachment']: + return False + return True + + def _check_volume_has_snapshot(self, volume): + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id) + + if len(snapshot_list) > 0: + return True + return False + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to the new volume type.""" + src_pool_name = volume['host'].split('#')[-1] + dst_pool_name = host['host'].split('#')[-1] + + if src_pool_name != dst_pool_name: + if self._check_volume_attachment(volume): + LOG.error( + 'Volume %(volume_id)s cannot be retyped ' + 'during attachment.', { + 'volume_id': volume['id']}) + return False + + if self._check_volume_has_snapshot(volume): + LOG.error( + 'Volume %(volume_id)s cannot be retyped ' + 'because it has snapshot.', { + 'volume_id': volume['id']}) + return False + + new_extraspecs = new_type['extra_specs'] + rc, model_update = self.migrate_volume( + volume, host, new_extraspecs) + + if rc: + LOG.info( + 'Retype Volume %(volume_id)s is done ' + 'and migrated to pool %(pool_id)s.', { + 'volume_id': volume['id'], + 'pool_id': host['capabilities']['pool_id']}) + + return (rc, model_update) + else: + # extract extraspecs for pool + src_extraspec = new_type['extra_specs'].copy() + + if self.PROVISIONING_KEY in diff['extra_specs']: + src_prov = diff['extra_specs'][self.PROVISIONING_KEY][0] + src_extraspec[self.PROVISIONING_KEY] = src_prov + + if self.TIERING_SET_KEY in diff['extra_specs']: + src_tier = diff['extra_specs'][self.TIERING_SET_KEY][0] + src_extraspec[self.TIERING_SET_KEY] = src_tier + + if src_extraspec != new_type['extra_specs']: + src_extraspec_set = self._get_extraspecs_set( + src_extraspec) + new_extraspec_set = self._get_extraspecs_set( + new_type['extra_specs']) + + src_extraspecs = self._get_pool_extraspecs( + src_pool_name, src_extraspec_set) + new_extraspecs = self._get_pool_extraspecs( + dst_pool_name, new_extraspec_set) + + if not self._check_volume_type_diff( + src_extraspecs, new_extraspecs, 'provisioning'): + LOG.warning( + 'The provisioning: [%(src)s] to [%(new)s] ' + 'is unable to retype.', { + 'src': src_extraspecs['provisioning'], + 'new': new_extraspecs['provisioning']}) + return False + + elif not self._check_volume_type_diff( + src_extraspecs, new_extraspecs, 'tiering'): + self._execute_retype_tiering(new_extraspecs, volume) + + LOG.info('Retype Volume %(volume_id)s is completed.', { + 'volume_id': volume['id']}) + + return True + + def _check_volume_type_diff(self, src_extraspecs, new_extraspecs, key): + if src_extraspecs[key] != new_extraspecs[key]: + return False + return True + + def _execute_retype_tiering(self, new_pool_extraspecs, volume): + part_id = self._extract_specific_provider_location( + volume['provider_location'], 'partition_id') + + if part_id is None: + part_id = self._get_part_id(volume['id']) + + pool_name = volume['host'].split('#')[-1] + pool_id = self._get_volume_pool_id(volume) + provisioning = new_pool_extraspecs['provisioning'] + new_tiering = new_pool_extraspecs['tiering'] + + if not self._check_tier_pool_or_not(pool_id): + return + + pool_tiers = self.tier_pools_dict[pool_id] + + if new_tiering == 'all': + if provisioning == 'thin': + tiering = ','.join(str(i) for i in pool_tiers) + else: + volume_size = gi_to_mi(volume['size']) + self._check_tier_space(pool_tiers[0], pool_id, volume_size) + tiering = str(pool_tiers[0]) + else: + if not self._check_pool_tiering(pool_tiers, new_tiering): + msg = _('Tiering extraspecs %(pool_name)s:%(tiering)s ' + 'can not fit in the real tiers %(pool_tier)s.') % { + 'pool_name': pool_name, + 'tiering': new_tiering, + 'pool_tier': pool_tiers} + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + if provisioning == 'thin': + tiering = ','.join(str(i) for i in new_tiering) + else: + volume_size = gi_to_mi(volume['size']) + self._check_tier_space(new_tiering[0], pool_id, volume_size) + tiering = str(new_tiering[0]) + + rc, out = self._execute( + 'SetPartition', 'tier-resided', part_id, 'tier=%s' % tiering) + rc, out = self._execute( + 'SetLV', 'tier-migrate', pool_id, 'part=%s' % part_id) + self._wait_tier_migrate_complete(part_id) + + def _wait_tier_migrate_complete(self, part_id): + def _inner(): + check_done = False + try: + rc, part_list = self._execute('ShowPartition', '-l') + for entry in part_list: + if (entry['ID'] == part_id and + self._check_tier_migrate_completed(entry)): + check_done = True + except Exception: + check_done = False + LOG.exception('Cannot detect tier migrate status.') + + if check_done: + raise loopingcall.LoopingCallDone() + + timer = loopingcall.FixedIntervalLoopingCall(_inner) + timer.start(interval=15).wait() + + def _check_tier_migrate_completed(self, part_info): + status = part_info['Progress'].lower() + if 'migrating' in status: + LOG.info('Retype volume [%(volume_name)s] ' + 'progess [%(progess)s].', { + 'volume_name': part_info['Name'], + 'progess': status}) + return False + return True + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder.""" + + manageable_volumes = [] # List to Return + cinder_ids = [cinder_volume.id for cinder_volume in cinder_volumes] + + rc, part_list = self._execute('ShowPartition', '-l') + + for entry in part_list: + # Check if parts are located within right LVs config. + pool_name = None + for _name, _id in self.pool_dict.items(): + if _id == entry['LV-ID']: + pool_name = _name + break + + if not pool_name: + continue + + if entry['Name'] in cinder_ids: + safety = False + reason = 'Already Managed' + cinder_id = entry['Name'] + elif entry['Mapped'].lower() != 'false': + safety = False + reason = 'Volume In-use' + cinder_id = None + else: + safety = True + reason = None + cinder_id = None + + volume = { + 'reference': { + 'source-id': entry['ID'], + 'source-name': entry['Name'], + 'pool-name': pool_name + }, + 'size': int(round(mi_to_gi(float(entry['Size'])))), + 'safe_to_manage': safety, + 'reason_not_safe': reason, + 'cinder_id': cinder_id, + 'extra_info': None + } + manageable_volumes.append(volume) + + return utils.paginate_entries_list(manageable_volumes, marker, limit, + offset, sort_keys, sort_dirs) + + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings existing backend storage object under Cinder management.""" + + si = self._get_snapshot_ref_data(existing_ref) + + self._execute('SetSnapshot', si['SI-ID'], 'name=%s' % snapshot.id) + + LOG.info('Rename Snapshot %(si_id)s completed.', { + 'si_id': si['SI-ID']}) + + return {'provider_location': si['SI-ID']} + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing.""" + + si = self._get_snapshot_ref_data(existing_ref) + + rc, part_list = self._execute('ShowPartition') + volume_id = si['Partition-ID'] + + for entry in part_list: + if entry['ID'] == volume_id: + part = entry + break + + return int(math.ceil(mi_to_gi(float(part['Size'])))) + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder.""" + + manageable_snapshots = [] # List to Return + cinder_si_ids = [cinder_si.id for cinder_si in cinder_snapshots] + + rc, si_list = self._execute('ShowSnapshot', '-l') + rc, part_list = self._execute('ShowPartition', '-l') + + for entry in si_list: + # Check if parts are located within right LVs config. + pool_name = None + for _name, _id in self.pool_dict.items(): + if _id == entry['LV-ID']: + pool_name = _name + break + + if not pool_name: + continue + + # Find si's partition + for part_entry in part_list: + if part_entry['ID'] == entry['Partition-ID']: + part = part_entry + break + + if entry['Name'] in cinder_si_ids: + safety = False + reason = 'Already Managed' + cinder_id = entry['Name'] + elif part['Mapped'].lower() != 'false': + safety = False + reason = 'Volume In-use' + cinder_id = None + else: + safety = True + reason = None + cinder_id = None + + return_si = { + 'reference': { + 'source-id': entry['ID'], + 'source-name': entry['Name'] + }, + 'size': int(round(mi_to_gi(float(part['Size'])))), + 'safe_to_manage': safety, + 'reason_not_safe': reason, + 'cinder_id': cinder_id, + 'extra_info': None, + 'source_reference': { + 'volume-id': part['Name'] + } + } + + manageable_snapshots.append(return_si) + + return utils.paginate_entries_list(manageable_snapshots, marker, limit, + offset, sort_keys, sort_dirs) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Cinder management.""" + + si_id = snapshot.provider_location + if si_id is None: + msg = _('Failed to get snapshot provider location.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self._execute('SetSnapshot', si_id, + 'name=cinder-unmanaged-%s' % snapshot.id[:-17]) + + LOG.info('Unmanaging Snapshot %(si_id)s is completed.', { + 'si_id': snapshot.id}) + return + + def _get_snapshot_ref_data(self, ref): + """Check the existance of SI for the specified partition.""" + + if 'source-name' in ref: + key = 'Name' + content = ref['source-name'] + if ref['source-name'] == '---': + LOG.warning( + 'Finding snapshot with default name "---" ' + 'can cause ambiguity.' + ) + elif 'source-id' in ref: + key = 'SI-ID' + content = ref['source-id'] + else: + msg = _('Reference must contain source-id or source-name.') + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=ref, reason=msg) + + rc, si_list = self._execute('ShowSnapshot') + si_data = {} + for entry in si_list: + if entry[key] == content: + si_data = entry + break + + if not si_data: + msg = _('Specified snapshot does not exist %(key)s: %(content)s.' + ) % {'key': key, 'content': content} + LOG.error(msg) + raise exception.ManageExistingInvalidReference( + existing_ref=ref, reason=msg) + + return si_data diff --git a/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst b/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst new file mode 100644 index 00000000000..c9bc891aafd --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst @@ -0,0 +1,143 @@ +======================== +Infortrend volume driver +======================== + +The `Infortrend `__ volume driver is a Block Storage driver +providing iSCSI and Fibre Channel support for Infortrend storages. + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +The Infortrend volume driver supports the following volume operations: + +* Create, delete, attach, and detach volumes. + +* Create and delete a snapshot. + +* Create a volume from a snapshot. + +* Copy an image to a volume. + +* Copy a volume to an image. + +* Clone a volume. + +* Extend a volume + +* Retype a volume. + +* Manage and unmanage a volume. + +* Migrate a volume with back-end assistance. + +* Live migrate an instance with volumes hosted on an Infortrend backend. + +System requirements +~~~~~~~~~~~~~~~~~~~ + +To use the Infortrend volume driver, the following settings are required: + +Set up Infortrend storage +------------------------- + +* Create logical volumes in advance. + +* Host side setting ``Peripheral device type`` should be + ``No Device Present (Type=0x7f)``. + +Set up cinder-volume node +------------------------- + +* Install JRE 7 or later. + +* Download the Infortrend storage CLI from the + `release page `__. + Choose the raidcmd_ESDS10.jar file, + which's under v2.1.3 on the github releases page, + and assign it to the default path ``/opt/bin/Infortrend/``. + +Driver configuration +~~~~~~~~~~~~~~~~~~~~ + +On ``cinder-volume`` nodes, set the following in your +``/etc/cinder/cinder.conf``, and use the following options to configure it: + +Driver options +-------------- + +.. include:: ../../tables/cinder-infortrend.inc + +iSCSI configuration example +--------------------------- + +.. code-block:: ini + + [DEFAULT] + default_volume_type = IFT-ISCSI + enabled_backends = IFT-ISCSI + + [IFT-ISCSI] + volume_driver = cinder.volume.drivers.infortrend.infortrend_iscsi_cli.InfortrendCLIISCSIDriver + volume_backend_name = IFT-ISCSI + infortrend_pools_name = POOL-1,POOL-2 + san_ip = MANAGEMENT_PORT_IP + infortrend_slots_a_channels_id = 0,1,2,3 + infortrend_slots_b_channels_id = 0,1,2,3 + +Fibre Channel configuration example +----------------------------------- + +.. code-block:: ini + + [DEFAULT] + default_volume_type = IFT-FC + enabled_backends = IFT-FC + + [IFT-FC] + volume_driver = cinder.volume.drivers.infortrend.infortrend_fc_cli.InfortrendCLIFCDriver + volume_backend_name = IFT-FC + infortrend_pools_name = POOL-1,POOL-2,POOL-3 + san_ip = MANAGEMENT_PORT_IP + infortrend_slots_a_channels_id = 4,5 + +Multipath configuration +----------------------- + +* Enable multipath for image transfer in ``/etc/cinder/cinder.conf``. + + .. code-block:: ini + + use_multipath_for_image_xfer = True + + Restart the ``cinder-volume`` service. + +* Enable multipath for volume attach and detach in ``/etc/nova/nova.conf``. + + .. code-block:: ini + + [libvirt] + ... + volume_use_multipath = True + ... + + Restart the ``nova-compute`` service. + +Extra spec usage +---------------- + +* ``infortrend:provisioning`` - Defaults to ``full`` provisioning, + the valid values are thin and full. + +* ``infortrend:tiering`` - Defaults to use ``all`` tiering, + the valid values are subsets of 0, 1, 2, 3. + + If multi-pools are configured in ``cinder.conf``, + it can be specified for each pool, separated by semicolon. + + For example: + + ``infortrend:provisioning``: ``POOL-1:thin; POOL-2:full`` + + ``infortrend:tiering``: ``POOL-1:all; POOL-2:0; POOL-3:0,1,3`` + +For more details, see `Infortrend documents `_. diff --git a/doc/source/configuration/tables/cinder-infortrend.inc b/doc/source/configuration/tables/cinder-infortrend.inc new file mode 100644 index 00000000000..689d798c560 --- /dev/null +++ b/doc/source/configuration/tables/cinder-infortrend.inc @@ -0,0 +1,38 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _cinder-infortrend: + +.. list-table:: Description of Infortrend volume driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``infortrend_cli_max_retries`` = ``5`` + - (Integer) The maximum retry times if a command fails. + * - ``infortrend_cli_path`` = ``/opt/bin/Infortrend/raidcmd_ESDS10.jar`` + - (String) The Infortrend CLI absolute path. + * - ``infortrend_cli_timeout`` = ``60`` + - (Integer) The timeout for CLI in seconds. + * - ``infortrend_cli_cache`` = ``False`` + - (Boolean) The Infortrend CLI cache. Make sure the array is only managed by Openstack, and it is only used by one cinder-volume node. Otherwise, never enable it! The data might be asynchronous if there were any other operations. + * - ``infortrend_pools_name`` = ``None`` + - (String) The Infortrend logical volumes name list. It is separated with comma. + * - ``infortrend_iqn_prefix`` = ``iqn.2002-10.com.infortrend`` + - (String) Infortrend iqn prefix for iSCSI. + * - ``infortrend_slots_a_channels_id`` = ``None`` + - (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. + * - ``infortrend_slots_b_channels_id`` = ``None`` + - (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. + * - ``java_path`` = ``/usr/bin/java`` + - (String) The Java absolute path. diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index e755eb8c83c..75fc71a9d4f 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -102,6 +102,9 @@ title=IBM XIV Storage Driver (iSCSI, FC) [driver.infinidat] title=Infinidat Storage Driver (iSCSI, FC) +[driver.infortrend] +title=infortrend Storage Driver (iSCSI, FC) + [driver.inspur] title=Inspur G2 Storage Driver (iSCSI, FC) @@ -225,6 +228,7 @@ driver.ibm_flashsystem=complete driver.ibm_gpfs=complete driver.ibm_storwize=complete driver.ibm_xiv=complete +driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete @@ -288,6 +292,7 @@ driver.ibm_flashsystem=complete driver.ibm_gpfs=complete driver.ibm_storwize=complete driver.ibm_xiv=complete +driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete @@ -351,6 +356,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing +driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing @@ -417,6 +423,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing +driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing @@ -482,6 +489,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete +driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=complete @@ -548,6 +556,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete +driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing @@ -613,6 +622,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=missing driver.ibm_xiv=missing +driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=complete @@ -679,6 +689,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=missing driver.ibm_xiv=missing +driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing @@ -745,6 +756,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete +driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=missing @@ -808,6 +820,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing +driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing @@ -875,6 +888,7 @@ driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=missing driver.ibm_xiv=missing +driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing diff --git a/releasenotes/notes/readd-infortrend-driver-d9b399b53a4355f8.yaml b/releasenotes/notes/readd-infortrend-driver-d9b399b53a4355f8.yaml new file mode 100644 index 00000000000..22b79f4e7a0 --- /dev/null +++ b/releasenotes/notes/readd-infortrend-driver-d9b399b53a4355f8.yaml @@ -0,0 +1,5 @@ +--- +features: + - Re-added Infortrend Cinder volume driver. The Infortrend driver, + removed in Cinder 12.0.0 (Queens), has been restored in this release. +