diff --git a/cinder/opts.py b/cinder/opts.py
index 2d2fa246b37..f0274559fd4 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -176,7 +176,8 @@ from cinder.volume.drivers.windows import iscsi as \
cinder_volume_drivers_windows_iscsi
from cinder.volume.drivers.windows import smbfs as \
cinder_volume_drivers_windows_smbfs
-from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara
+from cinder.volume.drivers.zadara import zadara as \
+ cinder_volume_drivers_zadara_zadara
from cinder.volume import manager as cinder_volume_manager
from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf
from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver
@@ -392,7 +393,7 @@ def list_opts():
cinder_volume_drivers_vzstorage.vzstorage_opts,
cinder_volume_drivers_windows_iscsi.windows_opts,
cinder_volume_drivers_windows_smbfs.volume_opts,
- cinder_volume_drivers_zadara.zadara_opts,
+ cinder_volume_drivers_zadara_zadara.common.zadara_opts,
cinder_volume_manager.volume_backend_opts,
cinder_volume_targets_spdknvmf.spdk_opts,
)),
diff --git a/cinder/tests/unit/volume/drivers/test_zadara.py b/cinder/tests/unit/volume/drivers/test_zadara.py
index a557059bc12..577f7109a75 100644
--- a/cinder/tests/unit/volume/drivers/test_zadara.py
+++ b/cinder/tests/unit/volume/drivers/test_zadara.py
@@ -22,9 +22,13 @@ import requests
from six.moves.urllib import parse
from cinder import exception
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers import zadara
+from cinder.volume.drivers.zadara import common
+from cinder.volume.drivers.zadara import exception as zadara_exception
+from cinder.volume.drivers.zadara import zadara
def check_access_key(func):
@@ -47,47 +51,62 @@ DEFAULT_RUNTIME_VARS = {
'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})],
'counter': 1000,
- 'login': """
-
-
- 2012-04-30...
- %s
- 1
- 2012-02-21...
- jsmith@example.com
- jsmith
-
- 0
- """,
-
- 'good': """
-
- 0
- """,
-
- 'bad_login': """
-
- 5
- Some message...
- """,
-
- 'bad_volume': """
-
- 10081
- Virtual volume xxx not found
- """,
-
- 'bad_server': """
-
- 10086
- Server xxx not found
- """,
-
- 'server_created': """
-
- %s
- 0
- """,
+ "login": """
+ {
+ "response": {
+ "user": {
+ "updated-at": "2021-01-22",
+ "access-key": "%s",
+ "id": 1,
+ "created-at": "2021-01-22",
+ "email": "jsmith@example.com",
+ "username": "jsmith"
+ },
+ "status": 0
+ }
+ }""",
+ "good": """
+ {
+ "response": {
+ "status": 0
+ }
+ }""",
+ "bad_login": """
+ {
+ "response": {
+ "status": 5,
+ "status-msg": "Some message..."
+ }
+ }""",
+ "bad_volume": """
+ {
+ "response": {
+ "status": 10081,
+ "status-msg": "Virtual volume xxx should be found"
+ }
+ }""",
+ "fake_volume": """
+ {
+ "response": {
+ "volumes": [],
+ "status": 0,
+ "status-msg": "Virtual volume xxx doesn't exist"
+ }
+ }""",
+ "bad_server": """
+ {
+ "response": {
+ "status": 10086,
+ "status-msg": "Server xxx not found"
+ }
+ }""",
+ "server_created": """
+ {
+ "response": {
+ "server_name": "%s",
+ "status": 0
+ }
+ }""",
}
RUNTIME_VARS = None
@@ -109,25 +128,32 @@ class FakeResponse(object):
return self.headers["X-Access-Key"]
def read(self):
- ops = {'POST': [('/api/users/login.xml', self._login),
- ('/api/volumes.xml', self._create_volume),
- ('/api/servers.xml', self._create_server),
- ('/api/servers/*/volumes.xml', self._attach),
- ('/api/volumes/*/detach.xml', self._detach),
- ('/api/volumes/*/expand.xml', self._expand),
- ('/api/consistency_groups/*/snapshots.xml',
+ ops = {'POST': [('/api/users/login.json', self._login),
+ ('/api/volumes.json', self._create_volume),
+ ('/api/servers.json', self._create_server),
+ ('/api/servers/*/volumes.json', self._attach),
+ ('/api/volumes/*/detach.json', self._detach),
+ ('/api/volumes/*/expand.json', self._expand),
+ ('/api/volumes/*/rename.json', self._rename),
+ ('/api/consistency_groups/*/snapshots.json',
self._create_snapshot),
- ('/api/consistency_groups/*/clone.xml',
+ ('/api/snapshots/*/rename.json',
+ self._rename_snapshot),
+ ('/api/consistency_groups/*/clone.json',
self._create_clone)],
'DELETE': [('/api/volumes/*', self._delete),
('/api/snapshots/*', self._delete_snapshot)],
- 'GET': [('/api/volumes.xml', self._list_volumes),
- ('/api/pools.xml', self._list_pools),
- ('/api/vcontrollers.xml', self._list_controllers),
- ('/api/servers.xml', self._list_servers),
- ('/api/consistency_groups/*/snapshots.xml',
+ 'GET': [('/api/volumes.json?showonlyblock=YES',
+ self._list_volumes),
+ ('/api/volumes.json?display_name=*',
+ self._get_volume_by_name),
+ ('/api/pools/*.json', self._get_pool),
+ ('/api/vcontrollers.json', self._list_controllers),
+ ('/api/servers.json', self._list_servers),
+ ('/api/consistency_groups/*/snapshots.json',
self._list_vol_snapshots),
- ('/api/volumes/*/servers.xml',
+ ('/api/snapshots.json', self._list_snapshots),
+ ('/api/volumes/*/servers.json',
self._list_vol_attachments)]
}
@@ -142,8 +168,11 @@ class FakeResponse(object):
items = url.split('/')
titems = template_url.split('/')
for (i, titem) in enumerate(titems):
- if titem != '*' and titem != items[i]:
+ if '*' not in titem and titem != items[i]:
return False
+ if '?' in titem and titem.split('=')[0] != items[i].split('=')[0]:
+ return False
+
return True
@staticmethod
@@ -167,9 +196,11 @@ class FakeResponse(object):
def _create_volume(self):
params = self.body
params['display-name'] = params['name']
- params['cg-name'] = params['name']
+ params['cg_name'] = params['name']
params['snapshots'] = []
- params['attachments'] = []
+ params['server_ext_names'] = ''
+ params['pool'] = 'pool-0001'
+ params['provider_location'] = params['name']
vpsa_vol = 'volume-%07d' % self._get_counter()
RUNTIME_VARS['volumes'].append((vpsa_vol, params))
return RUNTIME_VARS['good']
@@ -192,13 +223,16 @@ class FakeResponse(object):
vol = params['volume_name[]']
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if vol_name == vol:
- attachments = params['attachments']
+ if params['name'] == vol:
+ attachments = params['server_ext_names'].split(',')
if srv in attachments:
# already attached - ok
return RUNTIME_VARS['good']
else:
- attachments.append(srv)
+ if not attachments[0]:
+ params['server_ext_names'] = srv
+ else:
+ params['server_ext_names'] += ',' + srv
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
@@ -210,12 +244,14 @@ class FakeResponse(object):
srv = params['server_name[]']
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if vol_name == vol:
- attachments = params['attachments']
+ if params['name'] == vol:
+ attachments = params['server_ext_names'].split(',')
if srv not in attachments:
return RUNTIME_VARS['bad_server']
else:
attachments.remove(srv)
+ params['server_ext_names'] = (','.join([str(elem)
+ for elem in attachments]))
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
@@ -227,26 +263,55 @@ class FakeResponse(object):
capacity = params['capacity']
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if vol_name == vol:
+ if params['name'] == vol:
params['capacity'] = capacity
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
+ @check_access_key
+ def _rename(self):
+ params = self.body
+ vol = self.url.split('/')[3]
+
+ for (vol_name, vol_params) in RUNTIME_VARS['volumes']:
+ if vol_params['name'] == vol:
+ vol_params['name'] = params['new_name']
+ vol_params['display-name'] = params['new_name']
+ vol_params['cg_name'] = params['new_name']
+ return RUNTIME_VARS['good']
+
+ return RUNTIME_VARS['bad_volume']
+
+ @check_access_key
+ def _rename_snapshot(self):
+ params = self.body
+ vpsa_snapshot = self.url.split('/')[3]
+
+ for (vol_name, vol_params) in RUNTIME_VARS['volumes']:
+ for snapshot in vol_params['snapshots']:
+ if vpsa_snapshot == snapshot:
+ vol_params['snapshots'].remove(snapshot)
+ vol_params['snapshots'].append(params['newname'])
+ return RUNTIME_VARS['good']
+
+ return RUNTIME_VARS['bad_volume']
+
@check_access_key
def _create_snapshot(self):
params = self.body
cg_name = self.url.split('/')[3]
snap_name = params['display_name']
- for (vol_name, params) in RUNTIME_VARS['volumes']:
- if params['cg-name'] == cg_name:
- snapshots = params['snapshots']
+ for (vol_name, vol_params) in RUNTIME_VARS['volumes']:
+ if vol_params['cg_name'] == cg_name:
+ snapshots = vol_params['snapshots']
if snap_name in snapshots:
# already attached
return RUNTIME_VARS['bad_volume']
else:
snapshots.append(snap_name)
+ vol_params['has_snapshots'] = 'YES'
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
@@ -266,10 +331,11 @@ class FakeResponse(object):
def _create_clone(self):
params = self.body
params['display-name'] = params['name']
- params['cg-name'] = params['name']
+ params['cg_name'] = params['name']
params['capacity'] = 1
params['snapshots'] = []
- params['attachments'] = []
+ params['server_ext_names'] = ''
+ params['pool'] = 'pool-0001'
vpsa_vol = 'volume-%07d' % self._get_counter()
RUNTIME_VARS['volumes'].append((vpsa_vol, params))
return RUNTIME_VARS['good']
@@ -278,8 +344,8 @@ class FakeResponse(object):
vol = self.url.split('/')[3].split('.')[0]
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if vol_name == vol:
- if params['attachments']:
+ if params['name'] == vol:
+ if params['server_ext_names']:
# there are attachments - should be volume busy error
return RUNTIME_VARS['bad_volume']
else:
@@ -288,93 +354,281 @@ class FakeResponse(object):
return RUNTIME_VARS['bad_volume']
- def _generate_list_resp(self, header, footer, body, lst, vol):
- resp = header
+ def _generate_list_resp(self, null_body, body, lst, vol):
+ resp = ''
for (obj, params) in lst:
if vol:
resp += body % (obj,
params['display-name'],
- params['cg-name'],
- params['capacity'])
+ params['cg_name'],
+ params['capacity'],
+ params['pool'])
else:
resp += body % (obj, params['display-name'])
- resp += footer
- return resp
+ if resp:
+ return resp
+ else:
+ return null_body
def _list_volumes(self):
- header = """
- 0
- """
- footer = ""
- body = """
- %s
- %s
- %s
- Available
- %s
- 1
- r5
- write-through
- 2012-01-28...
- 2012-01-28...
- """
- return self._generate_list_resp(header,
- footer,
- body,
- RUNTIME_VARS['volumes'],
- True)
+ null_body = """
+ {
+ "response": {
+ "volumes": [
+ ],
+ "status": 0
+ }
+ }"""
+ body = """
+ {
+ "response": {
+ "volumes": %s,
+ "status": 0
+ }
+ }"""
+
+ volume_obj = """
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "cg_name": "%s",
+ "status": "%s",
+ "virtual_capacity": %d,
+ "pool_name": "%s",
+ "allocated-capacity": 1,
+ "raid-group-name": "r5",
+ "cache": "write-through",
+ "created-at": "2021-01-22",
+ "modified-at": "2021-01-22",
+ "has_snapshots": "%s"
+ }
+ """
+ if len(RUNTIME_VARS['volumes']) == 0:
+ return null_body
+ resp = ''
+ volume_list = ''
+ count = 0
+ for (vol_name, params) in RUNTIME_VARS['volumes']:
+ vol_status = (params.get('status') if params.get('status')
+ else 'Available')
+ has_snapshots = 'YES' if params.get('has_snapshots') else 'NO'
+ volume_dict = volume_obj % (params['name'],
+ params['display-name'],
+ params['cg_name'],
+ vol_status,
+ params['capacity'],
+ params['pool'],
+ has_snapshots)
+ if count == 0:
+ volume_list += volume_dict
+ count += 1
+ elif count != len(RUNTIME_VARS['volumes']):
+ volume_list = volume_list + ',' + volume_dict
+ count += 1
+ if volume_list:
+ volume_list = '[' + volume_list + ']'
+ resp = body % volume_list
+ return resp
+
+ return RUNTIME_VARS['bad_volume']
+
+ def _get_volume_by_name(self):
+ volume_name = self.url.split('=')[1]
+ body = """
+ {
+ "response": {
+ "volumes": [
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "cg_name": "%s",
+ "provider_location": "%s",
+ "status": "%s",
+ "virtual_capacity": %d,
+ "pool_name": "%s",
+ "allocated-capacity": 1,
+ "raid-group-name": "r5",
+ "cache": "write-through",
+ "created-at": "2021-01-22",
+ "modified-at": "2021-01-22",
+ "has_snapshots": "%s",
+ "server_ext_names": "%s"
+ }
+ ],
+ "status": 0
+ }
+ }"""
+ for (vol_name, params) in RUNTIME_VARS['volumes']:
+ if params['name'] == volume_name:
+ vol_status = (params.get('status') if params.get('status')
+ else 'Available')
+ has_snapshots = 'YES' if params.get('has_snapshots') else 'NO'
+ resp = body % (volume_name, params['display-name'],
+ params['cg_name'],
+ params['cg_name'],
+ vol_status,
+ params['capacity'],
+ params['pool'],
+ has_snapshots,
+ params['server_ext_names'])
+ return resp
+
+ return RUNTIME_VARS['fake_volume']
def _list_controllers(self):
- header = """
- 0
- """
- footer = ""
- body = """
- %s
- %s
- active
- iqn.2011-04.com.zadarastorage:vsa-xxx:1
- 1.1.1.1
- 1.1.1.1
- 0.0.09-05.1--77.7
- ok
- ok
- test_chap_user
- test_chap_secret
- """
- return self._generate_list_resp(header,
- footer,
+ null_body = """
+ {
+ "response": {
+ "vcontrollers": [
+ ],
+ "status": 0
+ }
+ }"""
+ body = """
+ {
+ "response": {
+ "vcontrollers": [
+ {
+ "name": "%s",
+ "display-name": "%s",
+ "state": "active",
+ "target":
+ "iqn.2011-04.zadarastorage:vsa-xxx:1",
+ "iscsi_ip": "1.1.1.1",
+ "iscsi_ipv6": "",
+ "mgmt-ip": "1.1.1.1",
+ "software-ver": "0.0.09-05.1--77.7",
+ "heartbeat1": "ok",
+ "heartbeat2": "ok",
+ "vpsa_chap_user": "test_chap_user",
+ "vpsa_chap_secret": "test_chap_secret"
+ }
+ ],
+ "status": 0
+ }
+ }"""
+ return self._generate_list_resp(null_body,
body,
RUNTIME_VARS['controllers'],
False)
- def _list_pools(self):
- header = """
- 0
-
- """
- footer = ""
- return header + footer
+ def _get_pool(self):
+ response = """
+ {
+ "response": {
+ "pool": {
+ "name": "pool-0001",
+ "capacity": 100,
+ "available_capacity": 99,
+ "provisioned_capacity": 1
+ },
+ "status": 0
+ }
+ }"""
+ return response
def _list_servers(self):
- header = """
- 0
- """
- footer = ""
- body = """
- %s
- %s
- %s
- Active
- 2012-01-28...
- 2012-01-28...
- """
+ null_body = """
+ {
+ "response": {
+ "servers": [
+ ],
+ "status": 0
+ }
+ }"""
+ body = """
+ {
+ "response": {
+ "servers": %s,
+ "status": 0
+ }
+ }"""
- resp = header
+ server_obj = """
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "iqn": "%s",
+ "target":
+ "iqn.2011-04.zadarastorage:vsa-xxx:1",
+ "lun": 0
+ }
+ """
+
+ resp = ''
+ server_list = ''
+ count = 0
for (obj, params) in RUNTIME_VARS['servers']:
- resp += body % (obj, params['display-name'], params['iqn'])
- resp += footer
- return resp
+ server_dict = server_obj % (obj,
+ params['display_name'],
+ params['iqn'])
+ if count == 0:
+ server_list += server_dict
+ count += 1
+ elif count != len(RUNTIME_VARS['servers']):
+ server_list = server_list + ',' + server_dict
+ count += 1
+ server_list = '[' + server_list + ']'
+ resp = body % server_list
+ if resp:
+ return resp
+ else:
+ return null_body
+
+ def _list_snapshots(self):
+ null_body = """
+ {
+ "response": {
+ "snapshots": [
+ ],
+ "status": 0
+ }
+ }"""
+ body = """
+ {
+ "response": {
+ "snapshots": %s,
+ "status": 0
+ }
+ }"""
+
+ snapshot_obj = """
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "volume_display_name": "%s",
+ "volume_capacity_mb": %d,
+ "volume_ext_name": "%s",
+ "cg_name": "%s",
+ "pool_name": "pool-0001"
+ }
+ """
+
+ resp = ''
+ snapshot_list = ''
+ count = 0
+ for (obj, params) in RUNTIME_VARS['volumes']:
+ snapshots = params['snapshots']
+ if len(snapshots) == 0:
+ continue
+ for snapshot in snapshots:
+ snapshot_dict = snapshot_obj % (snapshot, snapshot,
+ params['provider_location'],
+ params['capacity'] * 1024,
+ params['display-name'],
+ params['cg_name'])
+ if count == 0:
+ snapshot_list += snapshot_dict
+ count += 1
+ else:
+ snapshot_list = snapshot_list + ',' + snapshot_dict
+ count += 1
+ snapshot_list = '[' + snapshot_list + ']'
+ resp = body % snapshot_list
+ if resp:
+ return resp
+ else:
+ return null_body
def _get_server_obj(self, name):
for (srv_name, params) in RUNTIME_VARS['servers']:
@@ -384,28 +638,53 @@ class FakeResponse(object):
def _list_vol_attachments(self):
vol = self.url.split('/')[3]
- header = """
- 0
- """
- footer = ""
- body = """
- %s
- %s
- %s
- iqn.2011-04.com.zadarastorage:vsa-xxx:1
- 0
- """
+ null_body = """
+ {
+ "response": {
+ "servers": [
+ ],
+ "status": 0
+ }
+ }"""
+ body = """
+ {
+ "response": {
+ "servers": %s,
+ "status": 0
+ }
+ }"""
+ server_obj = """
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "iqn": "%s",
+ "target":
+ "iqn.2011-04.zadarastorage:vsa-xxx:1",
+ "lun": 0
+ }
+ """
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if vol_name == vol:
- attachments = params['attachments']
- resp = header
+ if params['name'] == vol:
+ attachments = params['server_ext_names'].split(',')
+ if not attachments[0]:
+ return null_body
+ resp = ''
+ server_list = ''
+ count = 0
for server in attachments:
srv_params = self._get_server_obj(server)
- resp += body % (server,
- srv_params['display-name'],
- srv_params['iqn'])
- resp += footer
+ server_dict = (server_obj % (server,
+ srv_params['display_name'],
+ srv_params['iqn']))
+ if count == 0:
+ server_list += server_dict
+ count += 1
+ elif count != len(attachments):
+ server_list = server_list + ',' + server_dict
+ count += 1
+ server_list = '[' + server_list + ']'
+ resp = body % server_list
return resp
return RUNTIME_VARS['bad_volume']
@@ -413,26 +692,51 @@ class FakeResponse(object):
def _list_vol_snapshots(self):
cg_name = self.url.split('/')[3]
- header = """
- 0
- """
- footer = ""
+ null_body = """
+ {
+ "response": {
+ "snapshots": [
+ ],
+ "status": 0
+ }
+ }"""
- body = """
- %s
- %s
- normal
- %s
- pool-00000001
- """
+ body = """
+ {
+ "response": {
+ "snapshots": %s,
+ "status": 0
+ }
+ }"""
+ snapshot_obj = """
+ {
+ "name": "%s",
+ "display_name": "%s",
+ "cg_name": "%s",
+ "pool_name": "pool-0001"
+ }
+ """
for (vol_name, params) in RUNTIME_VARS['volumes']:
- if params['cg-name'] == cg_name:
+ if params['cg_name'] == cg_name:
snapshots = params['snapshots']
- resp = header
- for snap in snapshots:
- resp += body % (snap, snap, cg_name)
- resp += footer
+ if len(snapshots) == 0:
+ return null_body
+ resp = ''
+ snapshot_list = ''
+ count = 0
+
+ for snapshot in snapshots:
+ snapshot_dict = snapshot_obj % (snapshot, snapshot,
+ cg_name)
+ if count == 0:
+ snapshot_list += snapshot_dict
+ count += 1
+ elif count != len(snapshots):
+ snapshot_list = snapshot_list + ',' + snapshot_dict
+ count += 1
+ snapshot_list = '[' + snapshot_list + ']'
+ resp = body % snapshot_list
return resp
return RUNTIME_VARS['bad_volume']
@@ -442,7 +746,11 @@ class FakeRequests(object):
"""A fake requests for zadara volume driver tests."""
def __init__(self, method, api_url, params=None, data=None,
headers=None, **kwargs):
- url = parse.urlparse(api_url).path
+ apiurl_items = parse.urlparse(api_url)
+ if apiurl_items.query:
+ url = apiurl_items.path + '?' + apiurl_items.query
+ else:
+ url = apiurl_items.path
res = FakeResponse(method, url, params, data, headers, **kwargs)
self.content = res.read()
self.status_code = res.status
@@ -464,7 +772,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
global RUNTIME_VARS
RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS)
self.configuration = mock.Mock(conf.Configuration(None))
- self.configuration.append_config_values(zadara.zadara_opts)
+ self.configuration.append_config_values(common.zadara_opts)
self.configuration.reserved_percentage = 10
self.configuration.zadara_use_iser = True
self.configuration.zadara_vpsa_host = '192.168.5.5'
@@ -477,7 +785,10 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.configuration.zadara_vol_name_template = 'OS_%s'
self.configuration.zadara_vpsa_use_ssl = False
self.configuration.zadara_ssl_cert_verify = False
+ self.configuration.driver_ssl_cert_path = '/path/to/cert'
self.configuration.zadara_default_snap_policy = False
+ self.configuration.zadara_gen3_vol_compress = False
+ self.configuration.zadara_gen3_vol_dedupe = False
self.driver = (zadara.ZadaraVPSAISCSIDriver(
configuration=self.configuration))
self.driver.do_setup(None)
@@ -485,32 +796,44 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_create_destroy(self):
"""Create/Delete volume."""
- volume = {'name': 'test_volume_01', 'size': 1}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
self.driver.create_volume(volume)
self.driver.delete_volume(volume)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_create_destroy_multiple(self):
"""Create/Delete multiple volumes."""
- self.driver.create_volume({'name': 'test_volume_01', 'size': 1})
- self.driver.create_volume({'name': 'test_volume_02', 'size': 2})
- self.driver.create_volume({'name': 'test_volume_03', 'size': 3})
- self.driver.delete_volume({'name': 'test_volume_02'})
- self.driver.delete_volume({'name': 'test_volume_03'})
- self.driver.delete_volume({'name': 'test_volume_01'})
- self.driver.delete_volume({'name': 'test_volume_04'})
+ vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ vol2_args = {'display_name': 'test_volume_02', 'size': 2, 'id': 2}
+ vol3_args = {'display_name': 'test_volume_03', 'size': 3, 'id': 3}
+ vol4_args = {'display_name': 'test_volume_04', 'size': 4, 'id': 4}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
+ volume2 = fake_volume.fake_volume_obj(None, **vol2_args)
+ volume3 = fake_volume.fake_volume_obj(None, **vol3_args)
+ volume4 = fake_volume.fake_volume_obj(None, **vol4_args)
+
+ self.driver.create_volume(volume1)
+ self.driver.create_volume(volume2)
+ self.driver.create_volume(volume3)
+ self.driver.delete_volume(volume1)
+ self.driver.delete_volume(volume2)
+ self.driver.delete_volume(volume3)
+ self.driver.delete_volume(volume4)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_destroy_non_existent(self):
"""Delete non-existent volume."""
- volume = {'name': 'test_volume_02', 'size': 1}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
self.driver.delete_volume(volume)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_empty_apis(self):
"""Test empty func (for coverage only)."""
context = None
- volume = {'name': 'test_volume_01', 'size': 1}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
self.driver.create_export(context, volume)
self.driver.ensure_export(context, volume)
self.driver.remove_export(context, volume)
@@ -522,17 +845,18 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_volume_attach_detach(self):
"""Test volume attachment and detach."""
- volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': '123'}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
connector = dict(initiator='test_iqn.1')
self.driver.create_volume(volume)
props = self.driver.initialize_connection(volume, connector)
self.assertEqual('iser', props['driver_volume_type'])
data = props['data']
self.assertEqual('1.1.1.1:3260', data['target_portal'])
- self.assertEqual('iqn.2011-04.com.zadarastorage:vsa-xxx:1',
+ self.assertEqual('iqn.2011-04.zadarastorage:vsa-xxx:1',
data['target_iqn'])
self.assertEqual(int('0'), data['target_lun'])
- self.assertEqual(123, data['volume_id'])
+ self.assertEqual(volume['id'], data['volume_id'])
self.assertEqual('CHAP', data['auth_method'])
self.assertEqual('test_chap_user', data['auth_username'])
self.assertEqual('test_chap_secret', data['auth_password'])
@@ -542,78 +866,55 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_wrong_attach_params(self):
"""Test different wrong attach scenarios."""
- volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
+ vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 101}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
connector1 = dict(initiator='test_iqn.1')
- self.assertRaises(exception.VolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.initialize_connection,
volume1, connector1)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_wrong_detach_params(self):
"""Test different wrong detachment scenarios."""
- volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
+ vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 101}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
# Volume is not created.
- self.assertRaises(exception.VolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.terminate_connection,
volume1, None)
self.driver.create_volume(volume1)
connector1 = dict(initiator='test_iqn.1')
# Server is not found. Volume is found
- self.assertRaises(zadara.ZadaraServerNotFound,
+ self.assertRaises(zadara_exception.ZadaraServerNotFound,
self.driver.terminate_connection,
volume1, connector1)
- volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
- volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
+ vol2_args = {'display_name': 'test_volume_02', 'size': 1, 'id': 102}
+ vol3_args = {'display_name': 'test_volume_03', 'size': 1, 'id': 103}
+ volume2 = fake_volume.fake_volume_obj(None, **vol2_args)
+ volume3 = fake_volume.fake_volume_obj(None, **vol3_args)
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
self.driver.create_volume(volume2)
self.driver.initialize_connection(volume1, connector1)
self.driver.initialize_connection(volume2, connector2)
# volume is found. Server not found
- self.assertRaises(zadara.ZadaraServerNotFound,
+ self.assertRaises(zadara_exception.ZadaraServerNotFound,
self.driver.terminate_connection,
volume1, connector3)
# Server is found. volume not found
- self.assertRaises(exception.VolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.terminate_connection,
volume3, connector1)
# Server and volume exits but not attached
- self.assertRaises(exception.FailedCmdWithDump,
+ self.assertRaises(common.exception.FailedCmdWithDump,
self.driver.terminate_connection,
volume1, connector2)
self.driver.terminate_connection(volume1, connector1)
self.driver.terminate_connection(volume2, connector2)
- @mock.patch.object(requests.Session, 'request', FakeRequests)
- def test_wrong_login_reply(self):
- """Test wrong login reply."""
- self.configuration.zadara_access_key = None
-
- RUNTIME_VARS['login'] = """
- %s
- 0
- """
- self.assertRaises(exception.MalformedResponse,
- self.driver.do_setup, None)
-
- RUNTIME_VARS['login'] = """
-
-
- 2012-04-30...
- 1
- 2012-02-21...
- jsmith@example.com
- jsmith
-
- %s
- 0
- """
- self.assertRaises(exception.MalformedResponse,
- self.driver.do_setup, None)
-
@mock.patch.object(requests.Session, 'request')
def test_ssl_use(self, request):
"""Coverage test for SSL connection."""
@@ -621,7 +922,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.configuration.zadara_vpsa_use_ssl = True
self.configuration.driver_ssl_cert_path = '/path/to/cert'
- fake_request_ctrls = FakeRequests("GET", "/api/vcontrollers.xml")
+ fake_request_ctrls = FakeRequests("GET", "/api/vcontrollers.json")
raw_controllers = fake_request_ctrls.content
good_response = mock.MagicMock()
good_response.status_code = RUNTIME_VARS['status']
@@ -653,7 +954,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
request.side_effect = request_verify_access_key
# when access key is invalid, driver will raise
# ZadaraInvalidAccessKey exception
- self.assertRaises(zadara.ZadaraInvalidAccessKey,
+ self.assertRaises(zadara_exception.ZadaraCinderInvalidAccessKey,
self.driver.do_setup,
None)
@@ -662,14 +963,16 @@ class ZadaraVPSADriverTestCase(test.TestCase):
"""Coverage test for non-good HTTP response."""
RUNTIME_VARS['status'] = 400
- volume = {'name': 'test_volume_01', 'size': 1}
+ vol_args = {'display_name': 'test_volume_03', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
self.assertRaises(exception.BadHTTPResponseStatus,
self.driver.create_volume, volume)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_terminate_connection_force_detach(self):
"""Test terminate connection for os-force_detach """
- volume = {'name': 'test_volume_01', 'size': 1, 'id': 101}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 101}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
connector = dict(initiator='test_iqn.1')
self.driver.create_volume(volume)
@@ -678,7 +981,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
# connector is None - force detach - detach all mappings
self.driver.terminate_connection(volume, None)
- self.assertRaises(zadara.exception.FailedCmdWithDump,
+ self.assertRaises(common.exception.FailedCmdWithDump,
self.driver.terminate_connection,
volume, connector)
@@ -688,7 +991,8 @@ class ZadaraVPSADriverTestCase(test.TestCase):
def test_delete_without_detach(self):
"""Test volume deletion without detach."""
- volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
+ vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 101}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
@@ -701,36 +1005,44 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_no_active_ctrl(self):
- volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 123}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
connector = dict(initiator='test_iqn.1')
self.driver.create_volume(volume)
RUNTIME_VARS['controllers'] = []
- self.assertRaises(zadara.ZadaraVPSANoActiveController,
+ self.assertRaises(zadara_exception.ZadaraVPSANoActiveController,
self.driver.initialize_connection,
volume, connector)
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_create_destroy_snapshot(self):
"""Create/Delete snapshot test."""
- volume = {'name': 'test_volume_01', 'size': 1}
- snapshot = {'name': 'snap_01',
- 'volume_name': volume['name']}
-
- self.driver.create_volume(volume)
+ wrong_vol_args = {'display_name': 'wrong_vol_01', 'size': 1, 'id': 2}
+ wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args)
+ wrong_snap_args = {'display_name': 'snap_01', 'volume': wrong_volume}
+ wrong_snapshot = fake_snapshot.fake_snapshot_obj(None,
+ **wrong_snap_args)
self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot,
- {'name': snapshot['name'],
- 'volume_name': 'wrong_vol'})
+ wrong_snapshot)
+ # Create cinder volume and snapshot
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ snap_args = {'display_name': 'test_snap_01', 'id': 1, 'volume': volume}
+ snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args)
+ self.driver.create_volume(volume)
self.driver.create_snapshot(snapshot)
# Deleted should succeed for missing volume
- self.driver.delete_snapshot({'name': snapshot['name'],
- 'volume_name': 'wrong_vol'})
+ self.driver.delete_snapshot(wrong_snapshot)
+
# Deleted should succeed for missing snap
- self.driver.delete_snapshot({'name': 'wrong_snap',
- 'volume_name': volume['name']})
+ fake_snap_args = {'display_name': 'test_snap_02',
+ 'id': 2, 'volume': volume}
+ fake_snap = fake_snapshot.fake_snapshot_obj(None, **fake_snap_args)
+ self.driver.delete_snapshot(fake_snap)
self.driver.delete_snapshot(snapshot)
self.driver.delete_volume(volume)
@@ -738,12 +1050,14 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_expand_volume(self):
"""Expand volume test."""
- volume = {'name': 'test_volume_01', 'size': 10}
- volume2 = {'name': 'test_volume_02', 'size': 10}
+ vol_args = {'display_name': 'test_volume_01', 'id': 1, 'size': 10}
+ vol2_args = {'display_name': 'test_volume_02', 'id': 2, 'size': 10}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ volume2 = fake_volume.fake_volume_obj(None, **vol2_args)
self.driver.create_volume(volume)
- self.assertRaises(zadara.ZadaraVolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.extend_volume,
volume2, 15)
self.assertRaises(exception.InvalidInput,
@@ -756,33 +1070,43 @@ class ZadaraVPSADriverTestCase(test.TestCase):
@mock.patch.object(requests.Session, 'request', FakeRequests)
def test_create_destroy_clones(self):
"""Create/Delete clones test."""
- volume1 = {'name': 'test_volume_01', 'id': '01', 'size': 1}
- volume2 = {'name': 'test_volume_02', 'id': '02', 'size': 2}
- volume3 = {'name': 'test_volume_03', 'id': '03', 'size': 1}
- snapshot = {'name': 'snap_01',
- 'id': '01',
- 'volume_name': volume1['name'],
- 'volume_size': 1}
+ vol1_args = {'display_name': 'test_volume_01', 'id': 1, 'size': 1}
+ vol2_args = {'display_name': 'test_volume_02', 'id': 2, 'size': 2}
+ vol3_args = {'display_name': 'test_volume_03', 'id': 3, 'size': 1}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
+ volume2 = fake_volume.fake_volume_obj(None, **vol2_args)
+ volume3 = fake_volume.fake_volume_obj(None, **vol3_args)
+ snap_args = {'display_name': 'test_snap_01',
+ 'id': 1, 'volume': volume1}
+ snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args)
self.driver.create_volume(volume1)
self.driver.create_snapshot(snapshot)
# Test invalid vol reference
- self.assertRaises(exception.VolumeNotFound,
+ wrong_vol_args = {'display_name': 'wrong_volume_01',
+ 'id': 4, 'size': 1}
+ wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args)
+ wrong_snap_args = {'display_name': 'test_wrong_snap',
+ 'id': 2, 'volume': wrong_volume}
+ wrong_snapshot = fake_snapshot.fake_snapshot_obj(None,
+ **wrong_snap_args)
+ self.assertRaises(exception.SnapshotNotFound,
self.driver.create_volume_from_snapshot,
- volume2,
- {'name': snapshot['name'],
- 'id': snapshot['id'],
- 'volume_name': 'wrong_vol'})
+ wrong_volume,
+ wrong_snapshot)
+
+ wrong_snap_args = {'display_name': 'test_wrong_snap',
+ 'id': 4, 'volume': volume1}
+ wrong_snapshot = fake_snapshot.fake_snapshot_obj(None,
+ **wrong_snap_args)
# Test invalid snap reference
self.assertRaises(exception.SnapshotNotFound,
self.driver.create_volume_from_snapshot,
- volume2,
- {'name': 'wrong_snap',
- 'id': 'wrong_id',
- 'volume_name': snapshot['volume_name']})
+ volume1,
+ wrong_snapshot)
# Test invalid src_vref for volume clone
- self.assertRaises(exception.VolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_cloned_volume,
volume3, volume2)
self.driver.create_volume_from_snapshot(volume2, snapshot)
@@ -798,10 +1122,11 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.configuration.safe_get.return_value = 'ZadaraVPSAISCSIDriver'
data = self.driver.get_volume_stats(True)
self.assertEqual('Zadara Storage', data['vendor_name'])
- self.assertEqual('unknown', data['total_capacity_gb'])
- self.assertEqual('unknown', data['free_capacity_gb'])
- self.assertEqual({'total_capacity_gb': 'unknown',
- 'free_capacity_gb': 'unknown',
+ self.assertEqual(100, data['total_capacity_gb'])
+ self.assertEqual(99, data['free_capacity_gb'])
+ self.assertEqual({'total_capacity_gb': 100,
+ 'free_capacity_gb': 99,
+ 'multiattach': True,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False,
@@ -810,3 +1135,252 @@ class ZadaraVPSADriverTestCase(test.TestCase):
'storage_protocol': 'iSER',
'volume_backend_name': 'ZadaraVPSAISCSIDriver'},
data)
+
+ def create_vpsa_backend_volume(self, vol_id, vol_name, vol_size,
+ vol_status, has_snapshots):
+ vol_params = {}
+ vol_params['id'] = vol_id
+ vol_params['name'] = vol_name
+ vol_params['display-name'] = vol_name
+ vol_params['cg_name'] = vol_name
+ vol_params['provider_location'] = vol_name
+ vol_params['status'] = vol_status
+ vol_params['capacity'] = vol_size
+ vol_params['pool'] = 'pool-0001'
+ vol_params['has_snapshots'] = has_snapshots
+ vol_params['server_ext_names'] = ''
+ vol_params['snapshots'] = []
+ volname = 'fake-volume'
+ vpsa_volume = (volname, vol_params)
+ RUNTIME_VARS['volumes'].append(vpsa_volume)
+ return vpsa_volume
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_manage_existing_volume(self):
+ vol_args = {'id': 'manage-name',
+ 'display_name': 'manage-name',
+ 'size': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ vpsa_volume = self.create_vpsa_backend_volume('fake_id',
+ 'fake_name', 1,
+ 'Available', 'NO')
+ # Check the failure with an empty reference for volume
+ identifier = {}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing,
+ volume, identifier)
+
+ # Check the failure with an invalid reference for volume
+ identifier['name'] = 'fake_identifier'
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing,
+ volume, identifier)
+
+ identifier['name'] = 'fake_name'
+ self.driver.manage_existing(volume, identifier)
+ # Check the new volume renamed accordingly
+ self.assertEqual(vpsa_volume[1]['display-name'],
+ 'OS_%s' % volume['name'])
+ self.driver.delete_volume(volume)
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_manage_existing_snapshot(self):
+ vol_args = {'display_name': 'fake_name', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ self.driver.create_volume(volume)
+
+ # Create a backend snapshot that will be managed by cinder volume
+ (vol_name, vol_params) = RUNTIME_VARS['volumes'][0]
+ vol_params['snapshots'].append('fakesnapname')
+
+ # Check the failure with wrong volume for snapshot
+ wrong_vol_args = {'display_name': 'wrong_volume_01',
+ 'size': 1, 'id': 2}
+ wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args)
+ wrong_snap_args = {'display_name': 'snap_01', 'volume': wrong_volume}
+ wrong_snapshot = fake_snapshot.fake_snapshot_obj(None,
+ **wrong_snap_args)
+ identifier = {}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot,
+ wrong_snapshot, identifier)
+
+ identifier['name'] = 'fake_identifier'
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot,
+ wrong_snapshot, identifier)
+
+ # Check the failure with wrong identifier for the snapshot
+ snap_args = {'display_name': 'manage_snapname',
+ 'id': 'manage_snapname', 'volume': volume}
+ snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args)
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot,
+ snapshot, identifier)
+
+ identifier['name'] = 'fakesnapname'
+ self.driver.manage_existing_snapshot(snapshot, identifier)
+ # Check that the backend snapshot has been renamed
+ (vol_name, vol_params) = RUNTIME_VARS['volumes'][0]
+ self.assertEqual(vol_params['snapshots'][0], snapshot['name'])
+ self.driver.delete_snapshot(snapshot)
+ self.driver.delete_volume(volume)
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_get_manageable_volumes(self):
+ vpsa_volume1 = self.create_vpsa_backend_volume('manage_vol_id1',
+ 'manage_vol1', 1,
+ 'Available', 'NO')
+ vpsa_volume2 = self.create_vpsa_backend_volume('manage_vol_id2',
+ 'manage_vol2', 2,
+ 'Available', 'NO')
+
+ cinder_vol1_args = {'display_name': 'fake-volume1',
+ 'size': 3, 'id': 'fake-volume1'}
+ cinder_vol2_args = {'display_name': 'fake-volume2',
+ 'size': 4, 'id': 'fake-volume2'}
+ cinder_vol1 = fake_volume.fake_volume_obj(None, **cinder_vol1_args)
+ cinder_vol2 = fake_volume.fake_volume_obj(None, **cinder_vol2_args)
+ self.driver.create_volume(cinder_vol1)
+ self.driver.create_volume(cinder_vol2)
+
+ cinder_vols = [cinder_vol1, cinder_vol2]
+ manageable_vols = (self.driver.get_manageable_volumes(
+ cinder_vols, None, 10, 0, ['size'], ['asc']))
+ # Check the volumes are returned in the sorted order
+ self.assertEqual(len(manageable_vols), 4)
+ self.assertGreater(manageable_vols[1]['size'],
+ manageable_vols[0]['size'])
+ self.assertGreater(manageable_vols[3]['size'],
+ manageable_vols[2]['size'])
+ self.driver.delete_volume(cinder_vol1)
+ self.driver.delete_volume(cinder_vol2)
+
+ # Try to manage the volume and delete it
+ vol1_args = {'display_name': 'manage-name1',
+ 'size': 1, 'id': 'manage-name1'}
+ volume1 = fake_volume.fake_volume_obj(None, **vol1_args)
+ identifier = {'name': 'manage_vol1'}
+ self.driver.manage_existing(volume1, identifier)
+ self.assertEqual(vpsa_volume1[1]['display-name'],
+ 'OS_%s' % volume1['name'])
+ self.driver.delete_volume(volume1)
+
+ # Manage and delete the volume
+ vol2_args = {'display_name': 'manage-name2',
+ 'size': 1, 'id': 'manage-name2'}
+ volume2 = fake_volume.fake_volume_obj(None, **vol2_args)
+ identifier = {'name': 'manage_vol2'}
+ self.driver.manage_existing(volume2, identifier)
+ self.assertEqual(vpsa_volume2[1]['display-name'],
+ 'OS_%s' % volume2['name'])
+ self.driver.delete_volume(volume2)
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_get_manageable_snapshots(self):
+ # Create a cinder volume and a snapshot
+ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ snap_args = {'display_name': 'test_snap_01',
+ 'id': 1, 'volume': volume}
+ snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args)
+ self.driver.create_volume(volume)
+ self.driver.create_snapshot(snapshot)
+
+ # Create backend snapshots for the volume
+ vpsa_volume = self.create_vpsa_backend_volume('manage_vol_id',
+ 'manage_vol', 1,
+ 'Available', 'YES')
+ snapshot1 = {'name': 'manage_snap_01',
+ 'volume_name': vpsa_volume[1]['name'],
+ 'provider_location': 'manage_snap_01'}
+ snapshot2 = {'name': 'manage_snap_02',
+ 'volume_name': vpsa_volume[1]['name'],
+ 'provider_location': 'manage_snap_02'}
+ vpsa_volume[1]['snapshots'].append(snapshot1['name'])
+ vpsa_volume[1]['snapshots'].append(snapshot2['name'])
+
+ cinder_snapshots = [snapshot]
+ manageable_snapshots = (self.driver.get_manageable_snapshots(
+ cinder_snapshots, None, 10, 0, ['reference'], ['asc']))
+ # Check the returned manageable snapshot names
+ self.assertEqual(snapshot1['name'],
+ manageable_snapshots[0]['reference']['name'])
+ self.assertEqual(snapshot2['name'],
+ manageable_snapshots[1]['reference']['name'])
+
+ # Verify the safety of the snapshots to manage
+ self.assertEqual(manageable_snapshots[0]['safe_to_manage'], True)
+ self.assertEqual(manageable_snapshots[1]['safe_to_manage'], True)
+
+ # Verify the refernce of the source volume of the snapshots
+ source_vol = manageable_snapshots[0]['source_reference']
+ self.assertEqual(vpsa_volume[1]['name'], source_vol['name'])
+ source_vol = manageable_snapshots[1]['source_reference']
+ self.assertEqual(vpsa_volume[1]['name'], source_vol['name'])
+ self.driver.delete_volume(volume)
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_manage_existing_volume_get_size(self):
+ vol_args = {'display_name': 'fake_name', 'id': 1, 'size': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ self.driver.create_volume(volume)
+
+ # Check the failure with empty reference of the volume
+ identifier = {}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_get_size,
+ volume, identifier)
+
+ # Check the failure with invalid volume reference
+ identifier = {'name': 'fake_identifiter'}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_get_size,
+ volume, identifier)
+
+ # Verify the volume size
+ identifier = {'name': 'OS_volume-%s' % volume['id']}
+ vol_size = self.driver.manage_existing_get_size(volume, identifier)
+ self.assertEqual(vol_size, volume.size)
+ self.driver.delete_volume(volume)
+
+ @mock.patch.object(requests.Session, 'request', FakeRequests)
+ def test_manage_existing_snapshot_get_size(self):
+ # Create a cinder volume and a snapshot
+ vol_args = {'display_name': 'fake_name', 'id': 1, 'size': 1}
+ volume = fake_volume.fake_volume_obj(None, **vol_args)
+ self.driver.create_volume(volume)
+ snap_args = {'display_name': 'fake_snap',
+ 'id': 1, 'volume': volume}
+ snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args)
+ self.driver.create_snapshot(snapshot)
+
+ # Check with the wrong volume of the snapshot
+ wrong_vol_args = {'display_name': 'wrong_volume_01',
+ 'size': 1, 'id': 2}
+ wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args)
+ wrong_snap_args = {'display_name': 'wrong_snap',
+ 'volume': wrong_volume}
+ wrong_snapshot = fake_snapshot.fake_snapshot_obj(None,
+ **wrong_snap_args)
+ identifier = {}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot_get_size,
+ wrong_snapshot, identifier)
+
+ identifier = {'name': 'fake_identifiter'}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot_get_size,
+ wrong_snapshot, identifier)
+
+ # Check with the invalid reference of the snapshot
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot_get_size,
+ snapshot, identifier)
+
+ # Verify the snapshot size same as the volume
+ identifier = {'name': 'snapshot-%s' % snapshot['id']}
+ snap_size = (self.driver.manage_existing_snapshot_get_size(
+ snapshot, identifier))
+ self.assertEqual(snap_size, volume['size'])
+ self.driver.delete_volume(volume)
diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py
deleted file mode 100644
index 4032759a38f..00000000000
--- a/cinder/volume/drivers/zadara.py
+++ /dev/null
@@ -1,753 +0,0 @@
-# Copyright (c) 2019 Zadara Storage, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Volume driver for Zadara Virtual Private Storage Array (VPSA).
-
-This driver requires VPSA with API version 15.07 or higher.
-"""
-
-from lxml import etree
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import strutils
-import requests
-import six
-
-from cinder import exception
-from cinder.i18n import _
-from cinder import interface
-from cinder.volume import configuration
-from cinder.volume import driver
-
-LOG = logging.getLogger(__name__)
-
-zadara_opts = [
- cfg.BoolOpt('zadara_use_iser',
- default=True,
- help='VPSA - Use ISER instead of iSCSI'),
- cfg.StrOpt('zadara_vpsa_host',
- default=None,
- help='VPSA - Management Host name or IP address'),
- cfg.PortOpt('zadara_vpsa_port',
- default=None,
- help='VPSA - Port number'),
- cfg.BoolOpt('zadara_vpsa_use_ssl',
- default=False,
- help='VPSA - Use SSL connection'),
- cfg.BoolOpt('zadara_ssl_cert_verify',
- default=True,
- help='If set to True the http client will validate the SSL '
- 'certificate of the VPSA endpoint.'),
- cfg.StrOpt('zadara_user',
- default=None,
- deprecated_for_removal=True,
- help='VPSA - Username'),
- cfg.StrOpt('zadara_password',
- default=None,
- help='VPSA - Password',
- deprecated_for_removal=True,
- secret=True),
- cfg.StrOpt('zadara_access_key',
- default=None,
- help='VPSA access key',
- secret=True),
- cfg.StrOpt('zadara_vpsa_poolname',
- default=None,
- help='VPSA - Storage Pool assigned for volumes'),
- cfg.BoolOpt('zadara_vol_encrypt',
- default=False,
- help='VPSA - Default encryption policy for volumes'),
- cfg.StrOpt('zadara_vol_name_template',
- default='OS_%s',
- help='VPSA - Default template for VPSA volume names'),
- cfg.BoolOpt('zadara_default_snap_policy',
- default=False,
- help="VPSA - Attach snapshot policy for volumes")]
-CONF = cfg.CONF
-CONF.register_opts(zadara_opts, group=configuration.SHARED_CONF_GROUP)
-
-
-class ZadaraServerCreateFailure(exception.VolumeDriverException):
- message = _("Unable to create server object for initiator %(name)s")
-
-
-class ZadaraServerNotFound(exception.NotFound):
- message = _("Unable to find server object for initiator %(name)s")
-
-
-class ZadaraVPSANoActiveController(exception.VolumeDriverException):
- message = _("Unable to find any active VPSA controller")
-
-
-class ZadaraAttachmentsNotFound(exception.NotFound):
- message = _("Failed to retrieve attachments for volume %(name)s")
-
-
-class ZadaraInvalidAttachmentInfo(exception.Invalid):
- message = _("Invalid attachment info for volume %(name)s: %(reason)s")
-
-
-class ZadaraVolumeNotFound(exception.VolumeDriverException):
- message = "%(reason)s"
-
-
-class ZadaraInvalidAccessKey(exception.VolumeDriverException):
- message = "Invalid VPSA access key"
-
-
-class ZadaraVPSAConnection(object):
- """Executes volume driver commands on VPSA."""
-
- def __init__(self, conf):
- self.conf = conf
- self.access_key = conf.zadara_access_key
-
- self.ensure_connection()
-
- def _generate_vpsa_cmd(self, cmd, **kwargs):
- """Generate command to be sent to VPSA."""
-
- # Dictionary of applicable VPSA commands in the following format:
- # 'command': (method, API_URL, {optional parameters})
- vpsa_commands = {
- 'login': ('POST',
- '/api/users/login.xml',
- {'user': self.conf.zadara_user,
- 'password': self.conf.zadara_password}),
- # Volume operations
- 'create_volume': ('POST',
- '/api/volumes.xml',
- {'name': kwargs.get('name'),
- 'capacity': kwargs.get('size'),
- 'pool': self.conf.zadara_vpsa_poolname,
- 'thin': 'YES',
- 'crypt': 'YES'
- if self.conf.zadara_vol_encrypt else 'NO',
- 'attachpolicies': 'NO'
- if not self.conf.zadara_default_snap_policy
- else 'YES'}),
- 'delete_volume': ('DELETE',
- '/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
- {'force': 'YES'}),
- 'expand_volume': ('POST',
- '/api/volumes/%s/expand.xml'
- % kwargs.get('vpsa_vol'),
- {'capacity': kwargs.get('size')}),
- # Snapshot operations
- # Snapshot request is triggered for a single volume though the
- # API call implies that snapshot is triggered for CG (legacy API).
- 'create_snapshot': ('POST',
- '/api/consistency_groups/%s/snapshots.xml'
- % kwargs.get('cg_name'),
- {'display_name': kwargs.get('snap_name')}),
- 'delete_snapshot': ('DELETE',
- '/api/snapshots/%s.xml'
- % kwargs.get('snap_id'),
- {}),
- 'create_clone_from_snap': ('POST',
- '/api/consistency_groups/%s/clone.xml'
- % kwargs.get('cg_name'),
- {'name': kwargs.get('name'),
- 'snapshot': kwargs.get('snap_id')}),
- 'create_clone': ('POST',
- '/api/consistency_groups/%s/clone.xml'
- % kwargs.get('cg_name'),
- {'name': kwargs.get('name')}),
- # Server operations
- 'create_server': ('POST',
- '/api/servers.xml',
- {'display_name': kwargs.get('initiator'),
- 'iqn': kwargs.get('initiator')}),
- # Attach/Detach operations
- 'attach_volume': ('POST',
- '/api/servers/%s/volumes.xml'
- % kwargs.get('vpsa_srv'),
- {'volume_name[]': kwargs.get('vpsa_vol'),
- 'force': 'NO'}),
- 'detach_volume': ('POST',
- '/api/volumes/%s/detach.xml'
- % kwargs.get('vpsa_vol'),
- {'server_name[]': kwargs.get('vpsa_srv'),
- 'force': 'YES'}),
- # Get operations
- 'list_volumes': ('GET',
- '/api/volumes.xml',
- {}),
- 'list_pools': ('GET',
- '/api/pools.xml',
- {}),
- 'list_controllers': ('GET',
- '/api/vcontrollers.xml',
- {}),
- 'list_servers': ('GET',
- '/api/servers.xml',
- {}),
- 'list_vol_attachments': ('GET',
- '/api/volumes/%s/servers.xml'
- % kwargs.get('vpsa_vol'),
- {}),
- 'list_vol_snapshots': ('GET',
- '/api/consistency_groups/%s/snapshots.xml'
- % kwargs.get('cg_name'),
- {})}
-
- try:
- method, url, params = vpsa_commands[cmd]
- except KeyError:
- raise exception.UnknownCmd(cmd=cmd)
-
- if method == 'GET':
- params = dict(page=1, start=0, limit=0)
- body = None
-
- elif method in ['DELETE', 'POST']:
- body = params
- params = None
-
- else:
- msg = (_('Method %(method)s is not defined') %
- {'method': method})
- LOG.error(msg)
- raise AssertionError(msg)
-
- # 'access_key' was generated using username and password
- # or it was taken from the input file
- headers = {'X-Access-Key': self.access_key}
-
- return method, url, params, body, headers
-
- def ensure_connection(self, cmd=None):
- """Retrieve access key for VPSA connection."""
-
- if self.access_key or cmd == 'login':
- return
-
- cmd = 'login'
- xml_tree = self.send_cmd(cmd)
- user = xml_tree.find('user')
- if user is None:
- raise (exception.MalformedResponse(
- cmd=cmd, reason=_('no "user" field')))
- access_key = user.findtext('access-key')
- if access_key is None:
- raise (exception.MalformedResponse(
- cmd=cmd, reason=_('no "access-key" field')))
- self.access_key = access_key
-
- def send_cmd(self, cmd, **kwargs):
- """Send command to VPSA Controller."""
-
- self.ensure_connection(cmd)
-
- method, url, params, body, headers = self._generate_vpsa_cmd(cmd,
- **kwargs)
- LOG.debug('Invoking %(cmd)s using %(method)s request.',
- {'cmd': cmd, 'method': method})
-
- host = self.conf.zadara_vpsa_host
- port = int(self.conf.zadara_vpsa_port)
-
- protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http"
- if protocol == "https":
- if not self.conf.zadara_ssl_cert_verify:
- verify = False
- else:
- cert = ((self.conf.driver_ssl_cert_path) or None)
- verify = cert if cert else True
- else:
- verify = False
-
- if port:
- api_url = "%s://%s:%d%s" % (protocol, host, port, url)
- else:
- api_url = "%s://%s%s" % (protocol, host, url)
-
- try:
- with requests.Session() as session:
- session.headers.update(headers)
- response = session.request(method, api_url, params=params,
- data=body, headers=headers,
- verify=verify)
- except requests.exceptions.RequestException as e:
- message = (_('Exception: %s') % six.text_type(e))
- raise exception.VolumeDriverException(message=message)
-
- if response.status_code != 200:
- raise exception.BadHTTPResponseStatus(status=response.status_code)
-
- data = response.content
- xml_tree = etree.fromstring(data)
- status = xml_tree.findtext('status')
- if status == '5':
- # Invalid Credentials
- raise ZadaraInvalidAccessKey()
-
- if status != '0':
- raise exception.FailedCmdWithDump(status=status, data=data)
-
- if method in ['POST', 'DELETE']:
- LOG.debug('Operation completed with status code %(status)s',
- {'status': status})
- return xml_tree
-
-
-@interface.volumedriver
-class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
- """Zadara VPSA iSCSI/iSER volume driver.
-
- .. code-block:: none
-
- Version history:
- 15.07 - Initial driver
- 16.05 - Move from httplib to requests
- 19.08 - Add API access key authentication option
- """
-
- VERSION = '19.08'
-
- # ThirdPartySystems wiki page
- CI_WIKI_NAME = "ZadaraStorage_VPSA_CI"
-
- def __init__(self, *args, **kwargs):
- super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs)
- self.vpsa = None
- self.configuration.append_config_values(zadara_opts)
-
- @staticmethod
- def get_driver_options():
- return zadara_opts
-
- def do_setup(self, context):
- """Any initialization the volume driver does while starting.
-
- Establishes initial connection with VPSA and retrieves access_key.
- """
- self.vpsa = ZadaraVPSAConnection(self.configuration)
- self._check_access_key_validity()
-
- def _check_access_key_validity(self):
- """Check VPSA access key"""
- self.vpsa.ensure_connection()
- if not self.vpsa.access_key:
- raise ZadaraInvalidAccessKey()
- active_ctrl = self._get_active_controller_details()
- if active_ctrl is None:
- raise ZadaraInvalidAccessKey()
-
- def check_for_setup_error(self):
- """Returns an error (exception) if prerequisites aren't met."""
- self._check_access_key_validity()
-
- def local_path(self, volume):
- """Return local path to existing local volume."""
- raise NotImplementedError()
-
- def _xml_parse_helper(self, xml_tree, first_level, search_tuple,
- first=True):
- """Helper for parsing VPSA's XML output.
-
- Returns single item if first==True or list for multiple selection.
- If second argument in search_tuple is None - returns all items with
- appropriate key.
- """
-
- objects = xml_tree.find(first_level)
- if objects is None:
- return None
-
- result_list = []
- key, value = search_tuple
- for child_object in list(objects):
- found_value = child_object.findtext(key)
- if found_value and (found_value == value or value is None):
- if first:
- return child_object
- else:
- result_list.append(child_object)
- return result_list if result_list else None
-
- def _get_vpsa_volume_name_and_size(self, name):
- """Return VPSA's name & size for the volume."""
- xml_tree = self.vpsa.send_cmd('list_volumes')
- volume = self._xml_parse_helper(xml_tree, 'volumes',
- ('display-name', name))
- if volume is not None:
- return (volume.findtext('name'),
- int(volume.findtext('virtual-capacity')))
-
- return None, None
-
- def _get_vpsa_volume_name(self, name):
- """Return VPSA's name for the volume."""
- (vol_name, size) = self._get_vpsa_volume_name_and_size(name)
- return vol_name
-
- def _get_volume_cg_name(self, name):
- """Return name of the consistency group for the volume.
-
- cg-name is a volume uniqe identifier (legacy attribute)
- and not consistency group as it may imply.
- """
- xml_tree = self.vpsa.send_cmd('list_volumes')
- volume = self._xml_parse_helper(xml_tree, 'volumes',
- ('display-name', name))
- if volume is not None:
- return volume.findtext('cg-name')
-
- return None
-
- def _get_snap_id(self, cg_name, snap_name):
- """Return snapshot ID for particular volume."""
- xml_tree = self.vpsa.send_cmd('list_vol_snapshots',
- cg_name=cg_name)
- snap = self._xml_parse_helper(xml_tree, 'snapshots',
- ('display-name', snap_name))
- if snap is not None:
- return snap.findtext('name')
-
- return None
-
- def _get_pool_capacity(self, pool_name):
- """Return pool's total and available capacities."""
- xml_tree = self.vpsa.send_cmd('list_pools')
- pool = self._xml_parse_helper(xml_tree, 'pools',
- ('name', pool_name))
- if pool is not None:
- total = int(pool.findtext('capacity'))
- free = int(float(pool.findtext('available-capacity')))
- LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free',
- {'name': pool_name, 'total': total, 'free': free})
- return total, free
-
- return 'unknown', 'unknown'
-
- def _get_active_controller_details(self):
- """Return details of VPSA's active controller."""
- xml_tree = self.vpsa.send_cmd('list_controllers')
- ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
- ('state', 'active'))
- if ctrl is not None:
- return dict(target=ctrl.findtext('target'),
- ip=ctrl.findtext('iscsi-ip'),
- chap_user=ctrl.findtext('vpsa-chap-user'),
- chap_passwd=ctrl.findtext('vpsa-chap-secret'))
- return None
-
- def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None):
- """Detach volume from all attached servers."""
- if vpsa_srv:
- list_servers_ids = [vpsa_srv]
- else:
- list_servers = self._get_servers_attached_to_volume(vpsa_vol)
- list_servers_ids = [s.findtext('name') for s in list_servers]
-
- for server_id in list_servers_ids:
- # Detach volume from server
- self.vpsa.send_cmd('detach_volume',
- vpsa_srv=server_id,
- vpsa_vol=vpsa_vol)
-
- def _get_server_name(self, initiator):
- """Return VPSA's name for server object with given IQN."""
- xml_tree = self.vpsa.send_cmd('list_servers')
- server = self._xml_parse_helper(xml_tree, 'servers',
- ('iqn', initiator))
- if server is not None:
- return server.findtext('name')
- return None
-
- def _create_vpsa_server(self, initiator):
- """Create server object within VPSA (if doesn't exist)."""
- vpsa_srv = self._get_server_name(initiator)
- if not vpsa_srv:
- xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator)
- vpsa_srv = xml_tree.findtext('server-name')
- return vpsa_srv
-
- def create_volume(self, volume):
- """Create volume."""
- self.vpsa.send_cmd(
- 'create_volume',
- name=self.configuration.zadara_vol_name_template % volume['name'],
- size=volume['size'])
-
- def delete_volume(self, volume):
- """Delete volume.
-
- Return ok if doesn't exist. Auto detach from all servers.
- """
- # Get volume name
- name = self.configuration.zadara_vol_name_template % volume['name']
- vpsa_vol = self._get_vpsa_volume_name(name)
- if not vpsa_vol:
- LOG.warning('Volume %s could not be found. '
- 'It might be already deleted', name)
- return
-
- self._detach_vpsa_volume(vpsa_vol=vpsa_vol)
-
- # Delete volume
- self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
-
- LOG.debug('Create snapshot: %s', snapshot['name'])
-
- # Retrieve the CG name for the base volume
- volume_name = (self.configuration.zadara_vol_name_template
- % snapshot['volume_name'])
- cg_name = self._get_volume_cg_name(volume_name)
- if not cg_name:
- msg = _('Volume %(name)s not found') % {'name': volume_name}
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
-
- self.vpsa.send_cmd('create_snapshot',
- cg_name=cg_name,
- snap_name=snapshot['name'])
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
-
- LOG.debug('Delete snapshot: %s', snapshot['name'])
-
- # Retrieve the CG name for the base volume
- volume_name = (self.configuration.zadara_vol_name_template
- % snapshot['volume_name'])
- cg_name = self._get_volume_cg_name(volume_name)
- if not cg_name:
- # If the volume isn't present, then don't attempt to delete
- LOG.warning('snapshot: original volume %s not found, '
- 'skipping delete operation',
- volume_name)
- return
-
- snap_id = self._get_snap_id(cg_name, snapshot['name'])
- if not snap_id:
- # If the snapshot isn't present, then don't attempt to delete
- LOG.warning('snapshot: snapshot %s not found, '
- 'skipping delete operation', snapshot['name'])
- return
-
- self.vpsa.send_cmd('delete_snapshot',
- snap_id=snap_id)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
-
- LOG.debug('Creating volume from snapshot: %s', snapshot['name'])
-
- # Retrieve the CG name for the base volume
- volume_name = (self.configuration.zadara_vol_name_template
- % snapshot['volume_name'])
- cg_name = self._get_volume_cg_name(volume_name)
- if not cg_name:
- LOG.error('Volume %(name)s not found', {'name': volume_name})
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- snap_id = self._get_snap_id(cg_name, snapshot['name'])
- if not snap_id:
- LOG.error('Snapshot %(name)s not found',
- {'name': snapshot['name']})
- raise exception.SnapshotNotFound(snapshot_id=snapshot['id'])
-
- self.vpsa.send_cmd('create_clone_from_snap',
- cg_name=cg_name,
- name=self.configuration.zadara_vol_name_template
- % volume['name'],
- snap_id=snap_id)
-
- if volume['size'] > snapshot['volume_size']:
- self.extend_volume(volume, volume['size'])
-
- def create_cloned_volume(self, volume, src_vref):
- """Creates a clone of the specified volume."""
-
- LOG.debug('Creating clone of volume: %s', src_vref['name'])
-
- # Retrieve the CG name for the base volume
- volume_name = (self.configuration.zadara_vol_name_template
- % src_vref['name'])
- cg_name = self._get_volume_cg_name(volume_name)
- if not cg_name:
- LOG.error('Volume %(name)s not found', {'name': volume_name})
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- self.vpsa.send_cmd('create_clone',
- cg_name=cg_name,
- name=self.configuration.zadara_vol_name_template
- % volume['name'])
-
- if volume['size'] > src_vref['size']:
- self.extend_volume(volume, volume['size'])
-
- def extend_volume(self, volume, new_size):
- """Extend an existing volume."""
- # Get volume name
- name = self.configuration.zadara_vol_name_template % volume['name']
- (vpsa_vol, size) = self._get_vpsa_volume_name_and_size(name)
- if not vpsa_vol:
- msg = (_('Volume %(name)s could not be found. '
- 'It might be already deleted') % {'name': name})
- LOG.error(msg)
- raise ZadaraVolumeNotFound(reason=msg)
-
- if new_size < size:
- raise exception.InvalidInput(
- reason=_('%(new_size)s < current size %(size)s') %
- {'new_size': new_size, 'size': size})
-
- expand_size = new_size - size
- self.vpsa.send_cmd('expand_volume',
- vpsa_vol=vpsa_vol,
- size=expand_size)
-
- def create_export(self, context, volume, vg=None):
- """Irrelevant for VPSA volumes. Export created during attachment."""
- pass
-
- def ensure_export(self, context, volume):
- """Irrelevant for VPSA volumes. Export created during attachment."""
- pass
-
- def remove_export(self, context, volume):
- """Irrelevant for VPSA volumes. Export removed during detach."""
- pass
-
- def initialize_connection(self, volume, connector):
- """Attach volume to initiator/host.
-
- During this call VPSA exposes volume to particular Initiator. It also
- creates a 'server' entity for Initiator (if it was not created before)
- All necessary connection information is returned, including auth data.
- Connection data (target, LUN) is not stored in the DB.
- """
- # First: Check Active controller: if not valid, raise exception
- ctrl = self._get_active_controller_details()
- if not ctrl:
- raise ZadaraVPSANoActiveController()
-
- # Get/Create server name for IQN
- initiator_name = connector['initiator']
- vpsa_srv = self._create_vpsa_server(initiator_name)
- if not vpsa_srv:
- raise ZadaraServerCreateFailure(name=initiator_name)
-
- # Get volume name
- name = self.configuration.zadara_vol_name_template % volume['name']
- vpsa_vol = self._get_vpsa_volume_name(name)
- if not vpsa_vol:
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- xml_tree = self.vpsa.send_cmd('list_vol_attachments',
- vpsa_vol=vpsa_vol)
- attach = self._xml_parse_helper(xml_tree, 'servers',
- ('name', vpsa_srv))
- # Attach volume to server
- if attach is None:
- self.vpsa.send_cmd('attach_volume',
- vpsa_srv=vpsa_srv,
- vpsa_vol=vpsa_vol)
-
- xml_tree = self.vpsa.send_cmd('list_vol_attachments',
- vpsa_vol=vpsa_vol)
- server = self._xml_parse_helper(xml_tree, 'servers',
- ('iqn', initiator_name))
- if server is None:
- raise ZadaraAttachmentsNotFound(name=name)
-
- target = server.findtext('target')
- lun = int(server.findtext('lun'))
- if None in [target, lun]:
- raise ZadaraInvalidAttachmentInfo(
- name=name,
- reason=_('target=%(target)s, lun=%(lun)s') %
- {'target': target, 'lun': lun})
-
- properties = {'target_discovered': False,
- 'target_portal': '%s:%s' % (ctrl['ip'], '3260'),
- 'target_iqn': target,
- 'target_lun': lun,
- 'volume_id': volume['id'],
- 'auth_method': 'CHAP',
- 'auth_username': ctrl['chap_user'],
- 'auth_password': ctrl['chap_passwd']}
-
- LOG.debug('Attach properties: %(properties)s',
- {'properties': strutils.mask_password(properties)})
- return {'driver_volume_type':
- ('iser' if (self.configuration.safe_get('zadara_use_iser'))
- else 'iscsi'), 'data': properties}
-
- def terminate_connection(self, volume, connector, **kwargs):
- """Detach volume from the initiator."""
-
- # Get server name for IQN
- if connector is None:
- # Detach volume from all servers
- # Get volume name
- name = self.configuration.zadara_vol_name_template % volume['name']
- vpsa_vol = self._get_vpsa_volume_name(name)
- if vpsa_vol:
- self._detach_vpsa_volume(vpsa_vol=vpsa_vol)
- return
- else:
- LOG.warning('Volume %s could not be found', name)
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- initiator_name = connector['initiator']
-
- vpsa_srv = self._get_server_name(initiator_name)
- if not vpsa_srv:
- raise ZadaraServerNotFound(name=initiator_name)
-
- # Get volume name
- name = self.configuration.zadara_vol_name_template % volume['name']
- vpsa_vol = self._get_vpsa_volume_name(name)
- if not vpsa_vol:
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- # Detach volume from server
- self._detach_vpsa_volume(vpsa_vol=vpsa_vol, vpsa_srv=vpsa_srv)
-
- def _get_servers_attached_to_volume(self, vpsa_vol):
- """Return all servers attached to volume."""
- xml_tree = self.vpsa.send_cmd('list_vol_attachments',
- vpsa_vol=vpsa_vol)
- list_servers = self._xml_parse_helper(xml_tree, 'servers',
- ('iqn', None), first=False)
- return list_servers or []
-
- def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
- LOG.debug("Updating volume stats")
- data = {}
- backend_name = self.configuration.safe_get('volume_backend_name')
- storage_protocol = ('iSER' if
- (self.configuration.safe_get('zadara_use_iser'))
- else 'iSCSI')
- data["volume_backend_name"] = backend_name or self.__class__.__name__
- data["vendor_name"] = 'Zadara Storage'
- data["driver_version"] = self.VERSION
- data["storage_protocol"] = storage_protocol
- data['reserved_percentage'] = self.configuration.reserved_percentage
- data['QoS_support'] = False
-
- (total, free) = self._get_pool_capacity(self.configuration.
- zadara_vpsa_poolname)
- data['total_capacity_gb'] = total
- data['free_capacity_gb'] = free
-
- self._stats = data
diff --git a/cinder/volume/drivers/zadara/__init__.py b/cinder/volume/drivers/zadara/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/volume/drivers/zadara/common.py b/cinder/volume/drivers/zadara/common.py
new file mode 100644
index 00000000000..e5d679f1ac1
--- /dev/null
+++ b/cinder/volume/drivers/zadara/common.py
@@ -0,0 +1,517 @@
+# Copyright (c) 2020 Zadara Storage, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import re
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import requests
+
+LOG = logging.getLogger(__name__)
+
+# Number of seconds the repsonse for the request sent to
+# vpsa is expected. Else the request will be timed out.
+# Setting it to 300 seconds initially.
+vpsa_timeout = 300
+
+
+# Common exception class for all the exceptions that
+# are used to redirect to the driver specific exceptions.
+class CommonException(Exception):
+ def __init__(self):
+ pass
+
+ class UnknownCmd(Exception):
+ def __init__(self, cmd):
+ self.cmd = cmd
+
+ class BadHTTPResponseStatus(Exception):
+ def __init__(self, status):
+ self.status = status
+
+ class FailedCmdWithDump(Exception):
+ def __init__(self, status, data):
+ self.status = status
+ self.data = data
+
+ class SessionRequestException(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ class ZadaraInvalidAccessKey(Exception):
+ pass
+
+
+exception = CommonException()
+
+
+zadara_opts = [
+ cfg.HostAddressOpt('zadara_vpsa_host',
+ default=None,
+ help='VPSA - Management Host name or IP address'),
+ cfg.PortOpt('zadara_vpsa_port',
+ default=None,
+ help='VPSA - Port number'),
+ cfg.BoolOpt('zadara_vpsa_use_ssl',
+ default=False,
+ help='VPSA - Use SSL connection'),
+ cfg.BoolOpt('zadara_ssl_cert_verify',
+ default=True,
+ help='If set to True the http client will validate the SSL '
+ 'certificate of the VPSA endpoint.'),
+ cfg.StrOpt('zadara_access_key',
+ default=None,
+ help='VPSA access key',
+ secret=True),
+ cfg.StrOpt('zadara_vpsa_poolname',
+ default=None,
+ help='VPSA - Storage Pool assigned for volumes'),
+ cfg.BoolOpt('zadara_vol_encrypt',
+ default=False,
+ help='VPSA - Default encryption policy for volumes. '
+ 'If the option is neither configured nor provided '
+ 'as metadata, the VPSA will inherit the default value.'),
+ cfg.BoolOpt('zadara_gen3_vol_dedupe',
+ default=False,
+ help='VPSA - Enable deduplication for volumes. '
+ 'If the option is neither configured nor provided '
+ 'as metadata, the VPSA will inherit the default value.'),
+ cfg.BoolOpt('zadara_gen3_vol_compress',
+ default=False,
+ help='VPSA - Enable compression for volumes. '
+ 'If the option is neither configured nor provided '
+ 'as metadata, the VPSA will inherit the default value.'),
+ cfg.BoolOpt('zadara_default_snap_policy',
+ default=False,
+ help="VPSA - Attach snapshot policy for volumes. "
+ "If the option is neither configured nor provided "
+ "as metadata, the VPSA will inherit the default value.")]
+
+
+# Class used to connect and execute the commands on
+# Zadara Virtual Private Storage Array (VPSA).
+class ZadaraVPSAConnection(object):
+ """Executes driver commands on VPSA."""
+
+ def __init__(self, conf, driver_ssl_cert_path, block):
+ self.conf = conf
+ self.access_key = conf.zadara_access_key
+ if not self.access_key:
+ raise exception.ZadaraInvalidAccessKey()
+ self.driver_ssl_cert_path = driver_ssl_cert_path
+ # Choose the volume type of either block or file-type
+ # that will help to filter volumes.
+ self.vol_type_str = 'showonlyblock' if block else 'showonlyfile'
+ # Dictionary of applicable VPSA commands in the following format:
+ # 'command': (method, API_URL, {optional parameters})
+ self.vpsa_commands = {
+ # Volume operations
+ 'create_volume': lambda kwargs: (
+ 'POST',
+ '/api/volumes.json',
+ {'name': kwargs.get('name'),
+ 'capacity': kwargs.get('size'),
+ 'pool': self.conf.zadara_vpsa_poolname,
+ 'block': 'YES'
+ if self.vol_type_str == 'showonlyblock'
+ else 'NO',
+ 'thin': 'YES',
+ 'crypt': 'YES'
+ if self.conf.zadara_vol_encrypt else 'NO',
+ 'compress': 'YES'
+ if self.conf.zadara_gen3_vol_compress else 'NO',
+ 'dedupe': 'YES'
+ if self.conf.zadara_gen3_vol_dedupe else 'NO',
+ 'attachpolicies': 'NO'
+ if not self.conf.zadara_default_snap_policy
+ else 'YES'}),
+ 'delete_volume': lambda kwargs: (
+ 'DELETE',
+ '/api/volumes/%s.json' % kwargs.get('vpsa_vol'),
+ {'force': 'YES'}),
+ 'expand_volume': lambda kwargs: (
+ 'POST',
+ '/api/volumes/%s/expand.json'
+ % kwargs.get('vpsa_vol'),
+ {'capacity': kwargs.get('size')}),
+ 'rename_volume': lambda kwargs: (
+ 'POST',
+ '/api/volumes/%s/rename.json'
+ % kwargs.get('vpsa_vol'),
+ {'new_name': kwargs.get('new_name')}),
+ # Snapshot operations
+ # Snapshot request is triggered for a single volume though the
+ # API call implies that snapshot is triggered for CG (legacy API).
+ 'create_snapshot': lambda kwargs: (
+ 'POST',
+ '/api/consistency_groups/%s/snapshots.json'
+ % kwargs.get('cg_name'),
+ {'display_name': kwargs.get('snap_name')}),
+ 'delete_snapshot': lambda kwargs: (
+ 'DELETE',
+ '/api/snapshots/%s.json'
+ % kwargs.get('snap_id'),
+ {}),
+ 'rename_snapshot': lambda kwargs: (
+ 'POST',
+ '/api/snapshots/%s/rename.json'
+ % kwargs.get('snap_id'),
+ {'newname': kwargs.get('new_name')}),
+ 'create_clone_from_snap': lambda kwargs: (
+ 'POST',
+ '/api/consistency_groups/%s/clone.json'
+ % kwargs.get('cg_name'),
+ {'name': kwargs.get('name'),
+ 'snapshot': kwargs.get('snap_id')}),
+ 'create_clone': lambda kwargs: (
+ 'POST',
+ '/api/consistency_groups/%s/clone.json'
+ % kwargs.get('cg_name'),
+ {'name': kwargs.get('name')}),
+ # Server operations
+ 'create_server': lambda kwargs: (
+ 'POST',
+ '/api/servers.json',
+ {'iqn': kwargs.get('iqn'),
+ 'iscsi': kwargs.get('iscsi_ip'),
+ 'display_name': kwargs.get('iqn')
+ if kwargs.get('iqn')
+ else kwargs.get('iscsi_ip')}),
+ # Attach/Detach operations
+ 'attach_volume': lambda kwargs: (
+ 'POST',
+ '/api/servers/%s/volumes.json'
+ % kwargs.get('vpsa_srv'),
+ {'volume_name[]': kwargs.get('vpsa_vol'),
+ 'access_type': kwargs.get('share_proto'),
+ 'readonly': kwargs.get('read_only'),
+ 'force': 'YES'}),
+ 'detach_volume': lambda kwargs: (
+ 'POST',
+ '/api/volumes/%s/detach.json'
+ % kwargs.get('vpsa_vol'),
+ {'server_name[]': kwargs.get('vpsa_srv'),
+ 'force': 'YES'}),
+ # Update volume comment
+ 'update_volume': lambda kwargs: (
+ 'POST',
+ '/api/volumes/%s/update_comment.json'
+ % kwargs.get('vpsa_vol'),
+ {'new_comment': kwargs.get('new_comment')}),
+
+ # Get operations
+ 'list_volumes': lambda kwargs: (
+ 'GET',
+ '/api/volumes.json?%s=YES' % self.vol_type_str,
+ {}),
+ 'get_volume': lambda kwargs: (
+ 'GET',
+ '/api/volumes/%s.json' % kwargs.get('vpsa_vol'),
+ {}),
+ 'get_volume_by_name': lambda kwargs: (
+ 'GET',
+ '/api/volumes.json?display_name=%s'
+ % kwargs.get('display_name'),
+ {}),
+ 'get_pool': lambda kwargs: (
+ 'GET',
+ '/api/pools/%s.json' % kwargs.get('pool_name'),
+ {}),
+ 'list_controllers': lambda kwargs: (
+ 'GET',
+ '/api/vcontrollers.json',
+ {}),
+ 'list_servers': lambda kwargs: (
+ 'GET',
+ '/api/servers.json',
+ {}),
+ 'list_vol_snapshots': lambda kwargs: (
+ 'GET',
+ '/api/consistency_groups/%s/snapshots.json'
+ % kwargs.get('cg_name'),
+ {}),
+ 'list_vol_attachments': lambda kwargs: (
+ 'GET',
+ '/api/volumes/%s/servers.json'
+ % kwargs.get('vpsa_vol'),
+ {}),
+ 'list_snapshots': lambda kwargs: (
+ 'GET',
+ '/api/snapshots.json',
+ {}),
+ # Put operations
+ 'change_export_name': lambda kwargs: (
+ 'PUT',
+ '/api/volumes/%s/export_name.json'
+ % kwargs.get('vpsa_vol'),
+ {'exportname': kwargs.get('exportname')})}
+
+ def _generate_vpsa_cmd(self, cmd, **kwargs):
+ """Generate command to be sent to VPSA."""
+ try:
+ method, url, params = self.vpsa_commands[cmd](kwargs)
+ # Populate the metadata for the volume creation
+ metadata = kwargs.get('metadata')
+ if metadata:
+ for key, value in metadata.items():
+ params[key] = value
+ except KeyError:
+ raise exception.UnknownCmd(cmd=cmd)
+
+ if method == 'GET':
+ params = dict(page=1, start=0, limit=0)
+ body = None
+
+ elif method in ['DELETE', 'POST', 'PUT']:
+ body = params
+ params = None
+
+ else:
+ msg = ('Method %(method)s is not defined' % {'method': method})
+ LOG.error(msg)
+ raise AssertionError(msg)
+
+ # 'access_key' was generated using username and password
+ # or it was taken from the input file
+ headers = {'X-Access-Key': self.access_key}
+
+ return method, url, params, body, headers
+
+ def send_cmd(self, cmd, **kwargs):
+ """Send command to VPSA Controller."""
+
+ if not self.access_key:
+ raise exception.ZadaraInvalidAccessKey()
+
+ method, url, params, body, headers = self._generate_vpsa_cmd(cmd,
+ **kwargs)
+ LOG.debug('Invoking %(cmd)s using %(method)s request.',
+ {'cmd': cmd, 'method': method})
+
+ host = self._get_target_host(self.conf.zadara_vpsa_host)
+ port = int(self.conf.zadara_vpsa_port)
+
+ protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http"
+ if protocol == "https":
+ if not self.conf.zadara_ssl_cert_verify:
+ verify = False
+ else:
+ verify = (self.driver_ssl_cert_path
+ if self.driver_ssl_cert_path else True)
+ else:
+ verify = False
+
+ if port:
+ api_url = "%s://%s:%d%s" % (protocol, host, port, url)
+ else:
+ api_url = "%s://%s%s" % (protocol, host, url)
+
+ try:
+ with requests.Session() as session:
+ session.headers.update(headers)
+ response = session.request(method, api_url, params=params,
+ data=body, headers=headers,
+ verify=verify, timeout=vpsa_timeout)
+ except requests.exceptions.RequestException as e:
+ msg = ('Exception: %s') % e
+ raise exception.SessionRequestException(msg=msg)
+
+ if response.status_code != 200:
+ raise exception.BadHTTPResponseStatus(
+ status=response.status_code)
+
+ data = response.content
+ json_data = json.loads(data)
+ response = json_data['response']
+ status = int(response['status'])
+ if status == 5:
+ # Invalid Credentials
+ raise exception.ZadaraInvalidAccessKey()
+
+ if status != 0:
+ raise exception.FailedCmdWithDump(status=status, data=data)
+
+ LOG.debug('Operation completed with status code %(status)s',
+ {'status': status})
+ return response
+
+ def _get_target_host(self, vpsa_host):
+ """Helper for target host formatting."""
+ ipv6_without_brackets = ':' in vpsa_host and vpsa_host[-1] != ']'
+ if ipv6_without_brackets:
+ return ('[%s]' % vpsa_host)
+ return ('%s' % vpsa_host)
+
+ def _get_active_controller_details(self):
+ """Return details of VPSA's active controller."""
+ data = self.send_cmd('list_controllers')
+ ctrl = None
+ vcontrollers = data.get('vcontrollers', [])
+ for controller in vcontrollers:
+ if controller['state'] == 'active':
+ ctrl = controller
+ break
+
+ if ctrl is not None:
+ target_ip = (ctrl['iscsi_ipv6'] if
+ ctrl['iscsi_ipv6'] else
+ ctrl['iscsi_ip'])
+ return dict(target=ctrl['target'],
+ ip=target_ip,
+ chap_user=ctrl['vpsa_chap_user'],
+ chap_passwd=ctrl['vpsa_chap_secret'])
+ return None
+
+ def _check_access_key_validity(self):
+ """Check VPSA access key"""
+ if not self.access_key:
+ raise exception.ZadaraInvalidAccessKey()
+ active_ctrl = self._get_active_controller_details()
+ if active_ctrl is None:
+ raise exception.ZadaraInvalidAccessKey()
+
+ def _get_vpsa_volume(self, name):
+ """Returns a single vpsa volume based on the display name"""
+ volume = None
+ display_name = name
+ if re.search(r"\s", name):
+ display_name = re.split(r"\s", name)[0]
+ data = self.send_cmd('get_volume_by_name',
+ display_name=display_name)
+ if data['status'] != 0:
+ return None
+ volumes = data['volumes']
+
+ for vol in volumes:
+ if vol['display_name'] == name:
+ volume = vol
+ break
+ return volume
+
+ def _get_vpsa_volume_by_id(self, vpsa_vol):
+ """Returns a single vpsa volume based on name"""
+ data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol)
+ return data['volume']
+
+ def _get_volume_cg_name(self, name):
+ """Return name of the consistency group for the volume.
+
+ cg-name is a volume uniqe identifier (legacy attribute)
+ and not consistency group as it may imply.
+ """
+ volume = self._get_vpsa_volume(name)
+ if volume is not None:
+ return volume['cg_name']
+
+ return None
+
+ def _get_all_vpsa_snapshots(self):
+ """Returns snapshots from all vpsa volumes"""
+ data = self.send_cmd('list_snapshots')
+ return data['snapshots']
+
+ def _get_all_vpsa_volumes(self):
+ """Returns all vpsa block volumes from the configured pool"""
+ data = self.send_cmd('list_volumes')
+ # FIXME: Work around to filter volumes belonging to given pool
+ # Remove this when we have the API fixed to filter based
+ # on pools. This API today does not have virtual_capacity field
+ volumes = []
+
+ for volume in data['volumes']:
+ if volume['pool_name'] == self.conf.zadara_vpsa_poolname:
+ volumes.append(volume)
+
+ return volumes
+
+ def _get_server_name(self, initiator, share):
+ """Return VPSA's name for server object.
+
+ 'share' will be true to search for filesystem volumes
+ """
+ data = self.send_cmd('list_servers')
+ servers = data.get('servers', [])
+ for server in servers:
+ if share:
+ if server['iscsi_ip'] == initiator:
+ return server['name']
+ else:
+ if server['iqn'] == initiator:
+ return server['name']
+ return None
+
+ def _create_vpsa_server(self, iqn=None, iscsi_ip=None):
+ """Create server object within VPSA (if doesn't exist)."""
+ initiator = iscsi_ip if iscsi_ip else iqn
+ share = True if iscsi_ip else False
+ vpsa_srv = self._get_server_name(initiator, share)
+ if not vpsa_srv:
+ data = self.send_cmd('create_server', iqn=iqn, iscsi_ip=iscsi_ip)
+ if data['status'] != 0:
+ return None
+ vpsa_srv = data['server_name']
+ return vpsa_srv
+
+ def _get_servers_attached_to_volume(self, vpsa_vol):
+ """Return all servers attached to volume."""
+ servers = vpsa_vol.get('server_ext_names')
+ list_servers = []
+ if servers:
+ list_servers = servers.split(',')
+ return list_servers
+
+ def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None):
+ """Detach volume from all attached servers."""
+ if vpsa_srv:
+ list_servers_ids = [vpsa_srv]
+ else:
+ list_servers_ids = self._get_servers_attached_to_volume(vpsa_vol)
+
+ for server_id in list_servers_ids:
+ # Detach volume from server
+ self.send_cmd('detach_volume', vpsa_srv=server_id,
+ vpsa_vol=vpsa_vol['name'])
+
+ def _get_volume_snapshots(self, cg_name):
+ """Get snapshots in the consistency group"""
+ data = self.send_cmd('list_vol_snapshots', cg_name=cg_name)
+ snapshots = data.get('snapshots', [])
+ return snapshots
+
+ def _get_snap_id(self, cg_name, snap_name):
+ """Return snapshot ID for particular volume."""
+ snapshots = self._get_volume_snapshots(cg_name)
+ for snap_vol in snapshots:
+ if snap_vol['display_name'] == snap_name:
+ return snap_vol['name']
+
+ return None
+
+ def _get_pool_capacity(self, pool_name):
+ """Return pool's total and available capacities."""
+ data = self.send_cmd('get_pool', pool_name=pool_name)
+ pool = data.get('pool')
+ if pool is not None:
+ total = int(pool['capacity'])
+ free = int(pool['available_capacity'])
+ provisioned = int(pool['provisioned_capacity'])
+ LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free, '
+ '%(provisioned)sGB provisioned',
+ {'name': pool_name, 'total': total,
+ 'free': free, 'provisioned': provisioned})
+ return total, free, provisioned
+
+ return 'unknown', 'unknown', 'unknown'
diff --git a/cinder/volume/drivers/zadara/exception.py b/cinder/volume/drivers/zadara/exception.py
new file mode 100644
index 00000000000..a1ccd79050e
--- /dev/null
+++ b/cinder/volume/drivers/zadara/exception.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2020 Zadara Storage, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Zadara Cinder driver exception handling.
+"""
+
+from cinder import exception
+from cinder.i18n import _
+
+
+class ZadaraSessionRequestException(exception.VolumeDriverException):
+ message = _("%(msg)s")
+
+
+class ZadaraCinderInvalidAccessKey(exception.VolumeDriverException):
+ message = "Invalid VPSA access key"
+
+
+class ZadaraVPSANoActiveController(exception.VolumeDriverException):
+ message = _("Unable to find any active VPSA controller")
+
+
+class ZadaraVolumeNotFound(exception.VolumeDriverException):
+ message = "%(reason)s"
+
+
+class ZadaraServerCreateFailure(exception.VolumeDriverException):
+ message = _("Unable to create server object for initiator %(name)s")
+
+
+class ZadaraAttachmentsNotFound(exception.VolumeDriverException):
+ message = _("Failed to retrieve attachments for volume %(name)s")
+
+
+class ZadaraInvalidAttachmentInfo(exception.VolumeDriverException):
+ message = _("Invalid attachment info for volume %(name)s: %(reason)s")
+
+
+class ZadaraServerNotFound(exception.VolumeDriverException):
+ message = _("Unable to find server object for initiator %(name)s")
diff --git a/cinder/volume/drivers/zadara/zadara.py b/cinder/volume/drivers/zadara/zadara.py
new file mode 100644
index 00000000000..e97468a6fb3
--- /dev/null
+++ b/cinder/volume/drivers/zadara/zadara.py
@@ -0,0 +1,729 @@
+# Copyright (c) 2019 Zadara Storage, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Volume driver for Zadara Virtual Private Storage Array (VPSA).
+
+This driver requires VPSA with API version 15.07 or higher.
+"""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import strutils
+import six
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder.objects import fields
+from cinder.volume import configuration
+from cinder.volume import driver
+from cinder.volume.drivers.zadara import common
+from cinder.volume.drivers.zadara import exception as zadara_exception
+from cinder.volume import volume_utils
+
+CONF = cfg.CONF
+CONF.register_opts(common.zadara_opts, group=configuration.SHARED_CONF_GROUP)
+
+LOG = logging.getLogger(__name__)
+
+cinder_opts = [
+ cfg.BoolOpt('zadara_use_iser',
+ default=True,
+ help='VPSA - Use ISER instead of iSCSI'),
+ cfg.StrOpt('zadara_vol_name_template',
+ default='OS_%s',
+ help='VPSA - Default template for VPSA volume names')]
+
+
+@interface.volumedriver
+class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
+ """Zadara VPSA iSCSI/iSER volume driver.
+
+ .. code-block:: none
+
+ Version history:
+ 15.07 - Initial driver
+ 16.05 - Move from httplib to requests
+ 19.08 - Add API access key authentication option
+ 20.01 - Move to json format from xml. Provide manage/unmanage
+ volume/snapshot feature
+ 20.12-01 - Merging with the common code for all the openstack drivers
+ 20.12-02 - Common code changed as part of fixing
+ Zadara github issue #18723
+ 20.12-03 - Adding the metadata support while creating volume to
+ configure vpsa.
+ 20.12-20 - IPv6 connectivity support for Cinder driver
+ 20.12-24 - Optimizing get manageable volumes and snapshots
+ """
+
+ VERSION = '20.12-24'
+
+ # ThirdPartySystems wiki page
+ CI_WIKI_NAME = "ZadaraStorage_VPSA_CI"
+
+ def __init__(self, *args, **kwargs):
+ super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs)
+ self.vpsa = None
+ self.configuration.append_config_values(common.zadara_opts)
+ self.configuration.append_config_values(cinder_opts)
+ # The valid list of volume options that can be specified
+ # as the metadata while creating cinder volume
+ self.vol_options = ['crypt', 'compress',
+ 'dedupe', 'attachpolicies']
+
+ @staticmethod
+ def get_driver_options():
+ driver_opts = []
+ driver_opts.extend(common.zadara_opts)
+ driver_opts.extend(cinder_opts)
+ return driver_opts
+
+ def _check_access_key_validity(self):
+ try:
+ self.vpsa._check_access_key_validity()
+ except common.exception.ZadaraInvalidAccessKey:
+ raise zadara_exception.ZadaraCinderInvalidAccessKey()
+
+ def do_setup(self, context):
+ """Any initialization the volume driver does while starting.
+
+ Establishes initial connection with VPSA and retrieves access_key.
+ Need to pass driver_ssl_cert_path here (and not fetch it from the
+ config opts directly in common code), because this config option is
+ different for different drivers and so cannot be figured in the
+ common code.
+ """
+ driver_ssl_cert_path = self.configuration.driver_ssl_cert_path
+ self.vpsa = common.ZadaraVPSAConnection(self.configuration,
+ driver_ssl_cert_path, True)
+ self._check_access_key_validity()
+
+ def check_for_setup_error(self):
+ """Returns an error (exception) if prerequisites aren't met."""
+ self._check_access_key_validity()
+
+ def local_path(self, volume):
+ """Return local path to existing local volume."""
+ raise NotImplementedError()
+
+ def _get_zadara_vol_template_name(self, vol_name):
+ return self.configuration.zadara_vol_name_template % vol_name
+
+ def _get_vpsa_volume(self, volume, raise_exception=True):
+ vpsa_volume = None
+ if volume.provider_location:
+ vpsa_volume = (self.vpsa._get_vpsa_volume_by_id(
+ volume.provider_location))
+ else:
+ vol_name = self._get_zadara_vol_template_name(volume.name)
+ vpsa_volume = self.vpsa._get_vpsa_volume(vol_name)
+
+ if not vpsa_volume:
+ vol_name = self._get_zadara_vol_template_name(volume.name)
+ msg = (_('Backend Volume %(name)s not found') % {'name': vol_name})
+ if raise_exception:
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+ LOG.warning(msg)
+ return vpsa_volume
+
+ def vpsa_send_cmd(self, cmd, **kwargs):
+ try:
+ response = self.vpsa.send_cmd(cmd, **kwargs)
+ except common.exception.UnknownCmd as e:
+ raise cinder_exception.UnknownCmd(cmd=e.cmd)
+ except common.exception.SessionRequestException as e:
+ raise zadara_exception.ZadaraSessionRequestException(msg=e.msg)
+ except common.exception.BadHTTPResponseStatus as e:
+ raise cinder_exception.BadHTTPResponseStatus(status=e.status)
+ except common.exception.FailedCmdWithDump as e:
+ raise cinder_exception.FailedCmdWithDump(status=e.status,
+ data=e.data)
+ except common.exception.ZadaraInvalidAccessKey:
+ raise zadara_exception.ZadaraCinderInvalidAccessKey()
+ return response
+
+ def _validate_existing_ref(self, existing_ref):
+ """Validates existing ref"""
+ if not existing_ref.get('name'):
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=_("manage_existing requires a 'name'"
+ " key to identify an existing volume."))
+
+ def _get_volume_metadata(self, volume):
+ if 'metadata' in volume:
+ return volume.metadata
+ if 'volume_metadata' in volume:
+ metadata = volume.volume_metadata
+ return {m['key']: m['value'] for m in metadata}
+ return {}
+
+ def is_valid_metadata(self, metadata):
+ LOG.debug('Metadata while creating volume: %(metadata)s',
+ {'metadata': metadata})
+ # Check the values allowed for provided metadata
+ return all(value in ('YES', 'NO')
+ for key, value in metadata.items()
+ if key in self.vol_options)
+
+ def create_volume(self, volume):
+ """Create volume."""
+ vol_name = self._get_zadara_vol_template_name(volume.name)
+
+ # Collect the volume metadata if any provided and validate it
+ metadata = self._get_volume_metadata(volume)
+ if not self.is_valid_metadata(metadata):
+ msg = (_('Invalid metadata for Volume %s') % vol_name)
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ data = self.vpsa_send_cmd('create_volume',
+ name=vol_name,
+ size=volume.size,
+ metadata=metadata)
+
+ return {'provider_location': data.get('vol_name')}
+
+ def delete_volume(self, volume):
+ """Delete volume.
+
+ Return ok if doesn't exist. Auto detach from all servers.
+ """
+ vpsa_volume = self._get_vpsa_volume(volume, False)
+ if not vpsa_volume:
+ return
+
+ self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume)
+
+ # Delete volume
+ self.vpsa_send_cmd('delete_volume', vpsa_vol=vpsa_volume['name'])
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot."""
+
+ LOG.debug('Create snapshot: %s', snapshot.name)
+
+ vpsa_volume = self._get_vpsa_volume(snapshot.volume)
+ # Retrieve the CG name for the base volume
+ cg_name = vpsa_volume['cg_name']
+ data = self.vpsa_send_cmd('create_snapshot',
+ cg_name=cg_name,
+ snap_name=snapshot.name)
+
+ return {'provider_location': data.get('snapshot_name')}
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+
+ LOG.debug('Delete snapshot: %s', snapshot.name)
+
+ vpsa_volume = self._get_vpsa_volume(snapshot.volume, False)
+ if not vpsa_volume:
+ # If the volume isn't present, then don't attempt to delete
+ return
+
+ # Retrieve the CG name for the base volume
+ cg_name = vpsa_volume['cg_name']
+ snap_id = self.vpsa._get_snap_id(cg_name, snapshot.name)
+ if not snap_id:
+ # If the snapshot isn't present, then don't attempt to delete
+ LOG.warning('snapshot: snapshot %s not found, '
+ 'skipping delete operation', snapshot.name)
+ return
+
+ self.vpsa_send_cmd('delete_snapshot',
+ snap_id=snap_id)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+
+ LOG.debug('Creating volume from snapshot: %s', snapshot.name)
+
+ vpsa_volume = self._get_vpsa_volume(snapshot.volume, False)
+ if not vpsa_volume:
+ LOG.error('Snapshot %(name)s not found.',
+ {'name': snapshot.name})
+ raise cinder_exception.SnapshotNotFound(snapshot_id=snapshot.id)
+
+ # Retrieve the CG name for the base volume
+ cg_name = vpsa_volume['cg_name']
+ snap_id = self.vpsa._get_snap_id(cg_name, snapshot.name)
+ if not snap_id:
+ LOG.error('Snapshot %(name)s not found',
+ {'name': snapshot.name})
+ raise cinder_exception.SnapshotNotFound(snapshot_id=snapshot.id)
+
+ volume_name = self._get_zadara_vol_template_name(volume.name)
+ self.vpsa_send_cmd('create_clone_from_snap',
+ cg_name=cg_name,
+ name=volume_name,
+ snap_id=snap_id)
+
+ vpsa_volume = self._get_vpsa_volume(volume)
+ if volume.size > snapshot.volume_size:
+ self.extend_volume(volume, volume.size)
+ return {'provider_location': vpsa_volume.get('name')}
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+
+ LOG.debug('Creating clone of volume: %s', src_vref.name)
+
+ vpsa_volume = self._get_vpsa_volume(src_vref)
+ # Retrieve the CG name for the base volume
+ cg_name = vpsa_volume['cg_name']
+ volume_name = self._get_zadara_vol_template_name(volume.name)
+ self.vpsa_send_cmd('create_clone',
+ cg_name=cg_name,
+ name=volume_name)
+
+ vpsa_volume = self._get_vpsa_volume(volume)
+ if volume.size > src_vref.size:
+ self.extend_volume(volume, volume.size)
+ return {'provider_location': vpsa_volume.get('name')}
+
+ def extend_volume(self, volume, new_size):
+ """Extend an existing volume."""
+ # Get volume
+ vpsa_volume = self._get_vpsa_volume(volume)
+ size = vpsa_volume['virtual_capacity']
+ if new_size < size:
+ raise cinder_exception.InvalidInput(
+ reason=_('%(new_size)s < current size %(size)s') %
+ {'new_size': new_size, 'size': size})
+
+ expand_size = new_size - size
+ self.vpsa_send_cmd('expand_volume',
+ vpsa_vol=vpsa_volume['name'],
+ size=expand_size)
+
+ def create_export(self, context, volume, vg=None):
+ """Irrelevant for VPSA volumes. Export created during attachment."""
+ pass
+
+ def ensure_export(self, context, volume):
+ """Irrelevant for VPSA volumes. Export created during attachment."""
+ pass
+
+ def remove_export(self, context, volume):
+ """Irrelevant for VPSA volumes. Export removed during detach."""
+ pass
+
+ def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
+ sort_keys, sort_dirs):
+ """List volumes on the backend available for management by Cinder"""
+ # Get all vpsa volumes
+ all_vpsa_volumes = self.vpsa._get_all_vpsa_volumes()
+
+ # Create a dictionary of existing volumes
+ existing_vols = {}
+ for cinder_vol in cinder_volumes:
+ if cinder_vol.provider_location:
+ volumes = (list(filter(lambda volume:
+ (volume['name'] == cinder_vol.provider_location),
+ all_vpsa_volumes)))
+ else:
+ cinder_name = (self._get_zadara_vol_template_name(
+ cinder_vol.name))
+ volumes = (list(filter(lambda volume:
+ (volume['display_name'] == cinder_name),
+ all_vpsa_volumes)))
+ for volume in volumes:
+ existing_vols[volume['name']] = cinder_vol.id
+
+ # Filter out all volumes already attached to any server
+ volumes_in_use = {}
+ volumes_not_available = {}
+ for volume in all_vpsa_volumes:
+ if volume['name'] in existing_vols:
+ continue
+
+ if volume['status'] == 'In-use':
+ volumes_in_use[volume['name']] =\
+ self.vpsa._get_servers_attached_to_volume(volume)
+ continue
+
+ if volume['status'] != 'Available':
+ volumes_not_available[volume['name']] = volume['display_name']
+ continue
+
+ manageable_vols = []
+ for vpsa_volume in all_vpsa_volumes:
+ vol_name = vpsa_volume['name']
+ vol_display_name = vpsa_volume['display_name']
+ cinder_id = existing_vols.get(vol_name)
+ not_safe_msgs = []
+
+ if vol_name in volumes_in_use:
+ host_list = volumes_in_use[vol_name]
+ not_safe_msgs.append(_('Volume connected to host(s) %s')
+ % host_list)
+
+ elif vol_name in volumes_not_available:
+ not_safe_msgs.append(_('Volume not available'))
+
+ if cinder_id:
+ not_safe_msgs.append(_('Volume already managed'))
+
+ is_safe = (len(not_safe_msgs) == 0)
+ reason_not_safe = ' && '.join(not_safe_msgs)
+
+ manageable_vols.append({
+ 'reference': {'name': vol_display_name},
+ 'size': vpsa_volume['virtual_capacity'],
+ 'safe_to_manage': is_safe,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_id,
+ })
+
+ return volume_utils.paginate_entries_list(
+ manageable_vols, marker, limit, offset, sort_keys, sort_dirs)
+
+ def manage_existing(self, volume, existing_ref):
+ """Bring an existing volume into cinder management"""
+ self._validate_existing_ref(existing_ref)
+
+ # Check if the volume exists in vpsa
+ name = existing_ref['name']
+ vpsa_volume = self.vpsa._get_vpsa_volume(name)
+ if not vpsa_volume:
+ msg = (_('Volume %(name)s could not be found. '
+ 'It might be already deleted') % {'name': name})
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ # Check if the volume is available
+ if vpsa_volume['status'] != 'Available':
+ msg = (_('Existing volume %(name)s is not available')
+ % {'name': name})
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ # Rename the volume to cinder specified name
+ new_name = self._get_zadara_vol_template_name(volume.name)
+ new_vpsa_volume = self.vpsa._get_vpsa_volume(new_name)
+ if new_vpsa_volume:
+ msg = (_('Volume %(new_name)s already exists')
+ % {'new_name': new_name})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ data = self.vpsa_send_cmd('rename_volume',
+ vpsa_vol=vpsa_volume['name'],
+ new_name=new_name)
+ return {'provider_location': data.get('vol_name')}
+
+ def manage_existing_get_size(self, volume, existing_ref):
+ """Return size of volume to be managed by manage_existing"""
+ # Check if the volume exists in vpsa
+ self._validate_existing_ref(existing_ref)
+ name = existing_ref['name']
+ vpsa_volume = self.vpsa._get_vpsa_volume(name)
+ if not vpsa_volume:
+ msg = (_('Volume %(name)s could not be found. '
+ 'It might be already deleted') % {'name': volume.name})
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ # Return the size of the volume
+ return vpsa_volume['virtual_capacity']
+
+ def unmanage(self, volume):
+ """Removes the specified volume from Cinder management"""
+ pass
+
+ def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
+ sort_keys, sort_dirs):
+ """Interface to support listing manageable snapshots and volumes"""
+ # Get all snapshots
+ vpsa_snapshots = self.vpsa._get_all_vpsa_snapshots()
+
+ # Get all snapshots of all volumes
+ all_vpsa_snapshots = []
+ for vpsa_snap in vpsa_snapshots:
+ if (vpsa_snap['pool_name'] ==
+ self.configuration.zadara_vpsa_poolname):
+ vpsa_snap['volume_name'] = vpsa_snap['volume_display_name']
+ vpsa_snap['size'] = float(vpsa_snap['volume_capacity_mb'] /
+ 1024)
+ all_vpsa_snapshots.append(vpsa_snap)
+
+ existing_snapshots = {}
+ for cinder_snapshot in cinder_snapshots:
+ if cinder_snapshot.provider_location:
+ snapshots = (list(filter(lambda snapshot:
+ ((snapshot['volume_ext_name'] ==
+ cinder_snapshot.volume.provider_location) and
+ (snapshot['name'] ==
+ cinder_snapshot.provider_location)),
+ all_vpsa_snapshots)))
+ else:
+ volume_name = (self._get_zadara_vol_template_name(
+ cinder_snapshot.volume_name))
+ snapshots = (list(filter(lambda snapshot:
+ ((snapshot['volume_display_name'] ==
+ volume_name) and
+ (snapshot['display_name'] ==
+ cinder_snapshot.name)),
+ all_vpsa_snapshots)))
+ for snapshot in snapshots:
+ existing_snapshots[snapshot['name']] = cinder_snapshot.id
+
+ manageable_snapshots = []
+ try:
+ unique_snapshots = []
+ for snapshot in all_vpsa_snapshots:
+ snap_id = snapshot['name']
+ if snap_id in unique_snapshots:
+ continue
+
+ cinder_id = existing_snapshots.get(snap_id)
+ is_safe = True
+ reason_not_safe = None
+
+ if cinder_id:
+ is_safe = False
+ reason_not_safe = _("Snapshot already managed.")
+
+ manageable_snapshots.append({
+ 'reference': {'name': snapshot['display_name']},
+ 'size': snapshot['size'],
+ 'safe_to_manage': is_safe,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_id,
+ 'extra_info': None,
+ 'source_reference': {'name': snapshot['volume_name']},
+ })
+
+ unique_snapshots.append(snap_id)
+ return volume_utils.paginate_entries_list(
+ manageable_snapshots, marker, limit, offset,
+ sort_keys, sort_dirs)
+ except Exception as e:
+ msg = (_('Exception: %s') % six.text_type(e))
+ LOG.error(msg)
+ raise
+
+ def manage_existing_snapshot(self, snapshot, existing_ref):
+ """Brings an existing backend storage object under Cinder management"""
+ self._validate_existing_ref(existing_ref)
+
+ snap_name = existing_ref['name']
+ volume = self._get_vpsa_volume(snapshot.volume, False)
+ if not volume:
+ msg = (_('Source volume of snapshot %s could not be found.'
+ ' Invalid data') % snap_name)
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ # Check if the snapshot exists
+ snap_id = self.vpsa._get_snap_id(volume['cg_name'], snap_name)
+ if not snap_id:
+ msg = (_('Snapshot %s could not be found. It might be'
+ ' already deleted') % snap_name)
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ new_name = snapshot.name
+ new_snap_id = self.vpsa._get_snap_id(volume['cg_name'], new_name)
+ if new_snap_id:
+ msg = (_('Snapshot with name %s already exists') % new_name)
+ LOG.debug(msg)
+ return
+
+ data = self.vpsa_send_cmd('rename_snapshot',
+ snap_id=snap_id,
+ new_name=new_name)
+ return {'provider_location': data.get('snapshot_name')}
+
+ def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
+ """Return size of snapshot to be managed by manage_existing"""
+ # We do not have any size field for a snapshot.
+ # We only have it on volumes. So, here just figure
+ # out the parent volume of this snapshot and return its size
+ self._validate_existing_ref(existing_ref)
+ snap_name = existing_ref['name']
+ volume = self._get_vpsa_volume(snapshot.volume, False)
+ if not volume:
+ msg = (_('Source volume of snapshot %s could not be found.'
+ ' Invalid data') % snap_name)
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ snap_id = self.vpsa._get_snap_id(volume['cg_name'], snap_name)
+ if not snap_id:
+ msg = (_('Snapshot %s could not be found. It might be '
+ 'already deleted') % snap_name)
+ LOG.error(msg)
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref,
+ reason=msg)
+
+ return volume['virtual_capacity']
+
+ def unmanage_snapshot(self, snapshot):
+ """Removes the specified snapshot from Cinder management"""
+ pass
+
+ def initialize_connection(self, volume, connector):
+ """Attach volume to initiator/host.
+
+ During this call VPSA exposes volume to particular Initiator. It also
+ creates a 'server' entity for Initiator (if it was not created before)
+ All necessary connection information is returned, including auth data.
+ Connection data (target, LUN) is not stored in the DB.
+ """
+ # First: Check Active controller: if not valid, raise exception
+ ctrl = self.vpsa._get_active_controller_details()
+ if not ctrl:
+ raise zadara_exception.ZadaraVPSANoActiveController()
+
+ # Get/Create server name for IQN
+ initiator_name = connector['initiator']
+ vpsa_srv = self.vpsa._create_vpsa_server(iqn=initiator_name)
+ if not vpsa_srv:
+ raise zadara_exception.ZadaraServerCreateFailure(
+ name=initiator_name)
+
+ # Get volume
+ vpsa_volume = self._get_vpsa_volume(volume)
+ servers = self.vpsa._get_servers_attached_to_volume(vpsa_volume)
+ attach = None
+ for server in servers:
+ if server == vpsa_srv:
+ attach = server
+ break
+ # Attach volume to server
+ if attach is None:
+ self.vpsa_send_cmd('attach_volume',
+ vpsa_srv=vpsa_srv,
+ vpsa_vol=vpsa_volume['name'])
+
+ data = self.vpsa_send_cmd('list_vol_attachments',
+ vpsa_vol=vpsa_volume['name'])
+ server = None
+ servers = data.get('servers', [])
+ for srv in servers:
+ if srv['iqn'] == initiator_name:
+ server = srv
+ break
+
+ if server is None:
+ vol_name = (self._get_zadara_vol_template_name(
+ volume.name))
+ raise zadara_exception.ZadaraAttachmentsNotFound(
+ name=vol_name)
+
+ target = server['target']
+ lun = int(server['lun'])
+ if None in [target, lun]:
+ vol_name = (self._get_zadara_vol_template_name(
+ volume.name))
+ raise zadara_exception.ZadaraInvalidAttachmentInfo(
+ name=vol_name,
+ reason=_('target=%(target)s, lun=%(lun)s') %
+ {'target': target, 'lun': lun})
+
+ ctrl_ip = self.vpsa._get_target_host(ctrl['ip'])
+ properties = {'target_discovered': False,
+ 'target_portal': (('%s:%s') % (ctrl_ip, '3260')),
+ 'target_iqn': target,
+ 'target_lun': lun,
+ 'volume_id': volume.id,
+ 'auth_method': 'CHAP',
+ 'auth_username': ctrl['chap_user'],
+ 'auth_password': ctrl['chap_passwd']}
+
+ LOG.debug('Attach properties: %(properties)s',
+ {'properties': strutils.mask_password(properties)})
+ return {'driver_volume_type':
+ ('iser' if (self.configuration.safe_get('zadara_use_iser'))
+ else 'iscsi'), 'data': properties}
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Detach volume from the initiator."""
+
+ vpsa_volume = self._get_vpsa_volume(volume)
+
+ if connector is None:
+ # Detach volume from all servers
+ # Get volume name
+ self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume)
+ return
+
+ # Check if there are multiple attachments to the volume from the
+ # same host. Terminate connection only for the last attachment from
+ # the corresponding host.
+ count = 0
+ host = connector.get('host') if connector else None
+ if host and volume.get('multiattach'):
+ attach_list = volume.volume_attachment
+ for attachment in attach_list:
+ if (attachment['attach_status'] !=
+ fields.VolumeAttachStatus.ATTACHED):
+ continue
+ if attachment.attached_host == host:
+ count += 1
+ if count > 1:
+ return
+
+ # Get server name for IQN
+ initiator_name = connector['initiator']
+
+ vpsa_srv = self.vpsa._get_server_name(initiator_name, False)
+ if not vpsa_srv:
+ raise zadara_exception.ZadaraServerNotFound(name=initiator_name)
+
+ if not vpsa_volume:
+ raise cinder_exception.VolumeNotFound(volume_id=volume.id)
+
+ # Detach volume from server
+ self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume,
+ vpsa_srv=vpsa_srv)
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+ LOG.debug("Updating volume stats")
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ storage_protocol = ('iSER' if
+ (self.configuration.safe_get('zadara_use_iser'))
+ else 'iSCSI')
+ pool_name = self.configuration.zadara_vpsa_poolname
+ (total, free, provisioned) = self.vpsa._get_pool_capacity(pool_name)
+ data = dict(
+ volume_backend_name=backend_name or self.__class__.__name__,
+ vendor_name='Zadara Storage',
+ driver_version=self.VERSION,
+ storage_protocol=storage_protocol,
+ reserved_percentage=self.configuration.reserved_percentage,
+ QoS_support=False,
+ multiattach=True,
+ total_capacity_gb=total,
+ free_capacity_gb=free
+ )
+
+ self._stats = data
diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
index a5248a50336..c85190f1a63 100644
--- a/cinder/volume/manager.py
+++ b/cinder/volume/manager.py
@@ -180,6 +180,8 @@ MAPPING = {
'FJDXISCSIDriver',
'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver':
'cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver',
+ 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver':
+ 'cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver',
}
diff --git a/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst b/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst
index fce6c9a832b..cb53023adfc 100644
--- a/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst
@@ -30,6 +30,9 @@ Supported operations
- Clone a volume
- Extend a volume
- Migrate a volume with back end assistance
+- Manage and unmanage a volume
+- Manage and unmanage volume snapshots
+- Multiattach a volume
Configuration
~~~~~~~~~~~~~
@@ -64,7 +67,7 @@ Sample minimum back end configuration
zadara_password = mysecretpassword
zadara_use_iser = false
zadara_vpsa_poolname = pool-00000001
- volume_driver = cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver
+ volume_driver = cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver
volume_backend_name = vpsa
Driver-specific options
@@ -76,7 +79,8 @@ to the Zadara Storage VPSA driver.
.. config-table::
:config-target: Zadara
- cinder.volume.drivers.zadara
+ cinder.volume.drivers.zadara.common
+ cinder.volume.drivers.zadara.zadara
.. note::
diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
index 983c9b8de09..b717a8368ab 100644
--- a/doc/source/reference/support-matrix.ini
+++ b/doc/source/reference/support-matrix.ini
@@ -844,7 +844,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
-driver.zadara=missing
+driver.zadara=complete
[operation.revert_to_snapshot_assisted]
title=Revert to Snapshot
diff --git a/releasenotes/notes/Zadara-newlayout-support-features-ffa20694c008ba86.yaml b/releasenotes/notes/Zadara-newlayout-support-features-ffa20694c008ba86.yaml
new file mode 100644
index 00000000000..69ea80892ec
--- /dev/null
+++ b/releasenotes/notes/Zadara-newlayout-support-features-ffa20694c008ba86.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Zadara VPSA Driver: Added support for cinder features volume manage,
+ snapshot manage, list manageable volumes, manageable snapshots, multiattach
+ and ipv6 support.
+
+upgrade:
+ - |
+ The Zadara VPSA Driver has been updated to support json format
+ and reorganized with new code layout. The module path
+ ``cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver`` should now be
+ updated to ``cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver``
+ in ``cinder.conf``.