Implement the share backup for NetApp driver
Implement the share backup feature for NetApp driver. NetApp SnapVault technology is used to create and restore the backup for NetApp ONTAP share. backup delete workflow just delete the transferred snapshot from destination backup volume. Depends-On: Ifb88ec096674ea8bc010c1c3f6dea1b51be3beaa Change-Id: I5a4edbf547e7886fb4fa9c1bed90110a33f9bf3b
This commit is contained in:
parent
a230ea511e
commit
dfbf51bafd
@ -175,6 +175,7 @@ _global_opt_lists = [
|
||||
manila.share.drivers.netapp.options.netapp_basicauth_opts,
|
||||
manila.share.drivers.netapp.options.netapp_provisioning_opts,
|
||||
manila.share.drivers.netapp.options.netapp_data_motion_opts,
|
||||
manila.share.drivers.netapp.options.netapp_backup_opts,
|
||||
manila.share.drivers.nexenta.options.nexenta_connection_opts,
|
||||
manila.share.drivers.nexenta.options.nexenta_dataset_opts,
|
||||
manila.share.drivers.nexenta.options.nexenta_nfs_opts,
|
||||
|
@ -2899,6 +2899,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
},
|
||||
'volume-space-attributes': {
|
||||
'size': None,
|
||||
'size-used': None,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2946,6 +2947,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
'type': volume_id_attributes.get_child_content('type'),
|
||||
'style': volume_id_attributes.get_child_content('style'),
|
||||
'size': volume_space_attributes.get_child_content('size'),
|
||||
'size-used': volume_space_attributes.get_child_content(
|
||||
'size-used'),
|
||||
'qos-policy-group-name': volume_qos_attributes.get_child_content(
|
||||
'policy-group-name'),
|
||||
'style-extended': volume_id_attributes.get_child_content(
|
||||
@ -3333,9 +3336,12 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
self.send_request('volume-destroy', {'name': volume_name})
|
||||
|
||||
@na_utils.trace
|
||||
def create_snapshot(self, volume_name, snapshot_name):
|
||||
def create_snapshot(self, volume_name, snapshot_name,
|
||||
snapmirror_label=None):
|
||||
"""Creates a volume snapshot."""
|
||||
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
|
||||
if snapmirror_label is not None:
|
||||
api_args['snapmirror-label'] = snapmirror_label
|
||||
self.send_request('snapshot-create', api_args)
|
||||
|
||||
@na_utils.trace
|
||||
@ -3345,7 +3351,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
'volume %(volume)s',
|
||||
{'snapshot': snapshot_name, 'volume': volume_name})
|
||||
|
||||
"""Gets a single snapshot."""
|
||||
# Gets a single snapshot.
|
||||
api_args = {
|
||||
'query': {
|
||||
'snapshot-info': {
|
||||
@ -5016,15 +5022,20 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
for snapshot_info in attributes_list.get_children()]
|
||||
|
||||
@na_utils.trace
|
||||
def create_snapmirror_policy(self, policy_name, type='async_mirror',
|
||||
def create_snapmirror_policy(self, policy_name,
|
||||
policy_type='async_mirror',
|
||||
discard_network_info=True,
|
||||
preserve_snapshots=True):
|
||||
preserve_snapshots=True,
|
||||
snapmirror_label='all_source_snapshots',
|
||||
keep=1
|
||||
):
|
||||
"""Creates a SnapMirror policy for a vServer."""
|
||||
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'policy-name': policy_name,
|
||||
'type': type,
|
||||
'type': policy_type,
|
||||
}
|
||||
|
||||
if discard_network_info:
|
||||
@ -5037,8 +5048,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
if preserve_snapshots:
|
||||
api_args = {
|
||||
'policy-name': policy_name,
|
||||
'snapmirror-label': 'all_source_snapshots',
|
||||
'keep': '1',
|
||||
'snapmirror-label': snapmirror_label,
|
||||
'keep': keep,
|
||||
'preserve': 'false'
|
||||
}
|
||||
|
||||
@ -6239,3 +6250,41 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
|
||||
# Convert Bytes to GBs.
|
||||
return (total_volumes_size / 1024**3)
|
||||
|
||||
@na_utils.trace
|
||||
def snapmirror_restore_vol(self, source_path=None, dest_path=None,
|
||||
source_vserver=None, dest_vserver=None,
|
||||
source_volume=None, dest_volume=None,
|
||||
source_snapshot=None):
|
||||
"""Restore snapshot copy from destination volume to source volume"""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = self._build_snapmirror_request(
|
||||
source_path, dest_path, source_vserver,
|
||||
dest_vserver, source_volume, dest_volume)
|
||||
if source_snapshot:
|
||||
api_args["source-snapshot"] = source_snapshot
|
||||
self.send_request('snapmirror-restore', api_args)
|
||||
|
||||
@na_utils.trace
|
||||
def list_volume_snapshots(self, volume_name, snapmirror_label=None,
|
||||
newer_than=None):
|
||||
"""Gets SnapMirror snapshots on a volume."""
|
||||
api_args = {
|
||||
'query': {
|
||||
'snapshot-info': {
|
||||
'volume': volume_name,
|
||||
},
|
||||
},
|
||||
}
|
||||
if newer_than:
|
||||
api_args['query']['snapshot-info'][
|
||||
'access-time'] = '>' + newer_than
|
||||
if snapmirror_label:
|
||||
api_args['query']['snapshot-info'][
|
||||
'snapmirror-label'] = snapmirror_label
|
||||
result = self.send_iter_request('snapshot-get-iter', api_args)
|
||||
attributes_list = result.get_child_by_name(
|
||||
'attributes-list') or netapp_api.NaElement('none')
|
||||
return [snapshot_info.get_child_content('name')
|
||||
for snapshot_info in attributes_list.get_children()]
|
||||
|
@ -857,7 +857,7 @@ class NetAppRestClient(object):
|
||||
query = {
|
||||
'name': volume_name,
|
||||
'fields': 'aggregates.name,nas.path,name,svm.name,type,style,'
|
||||
'qos.policy.name,space.size'
|
||||
'qos.policy.name,space.size,space.used'
|
||||
}
|
||||
|
||||
result = self.send_request('/storage/volumes', 'get', query=query)
|
||||
@ -888,6 +888,7 @@ class NetAppRestClient(object):
|
||||
'type': volume_infos.get('type'),
|
||||
'style': volume_infos.get('style'),
|
||||
'size': volume_infos.get('space', {}).get('size'),
|
||||
'size-used': volume_infos.get('space', {}).get('used'),
|
||||
'qos-policy-group-name': (
|
||||
volume_infos.get('qos', {}).get('policy', {}).get('name')),
|
||||
'style-extended': volume_infos.get('style')
|
||||
@ -1833,7 +1834,8 @@ class NetAppRestClient(object):
|
||||
'patch', body=body)
|
||||
|
||||
@na_utils.trace
|
||||
def create_snapshot(self, volume_name, snapshot_name):
|
||||
def create_snapshot(self, volume_name, snapshot_name,
|
||||
snapmirror_label=None):
|
||||
"""Creates a volume snapshot."""
|
||||
|
||||
volume = self._get_volume_by_args(vol_name=volume_name)
|
||||
@ -1841,6 +1843,8 @@ class NetAppRestClient(object):
|
||||
body = {
|
||||
'name': snapshot_name,
|
||||
}
|
||||
if snapmirror_label is not None:
|
||||
body['snapmirror_label'] = snapmirror_label
|
||||
self.send_request(f'/storage/volumes/{uuid}/snapshots', 'post',
|
||||
body=body)
|
||||
|
||||
@ -2323,7 +2327,10 @@ class NetAppRestClient(object):
|
||||
else record.get('state')),
|
||||
'transferring-state': record.get('transfer', {}).get('state'),
|
||||
'mirror-state': record.get('state'),
|
||||
'schedule': record['transfer_schedule']['name'],
|
||||
'schedule': (
|
||||
record['transfer_schedule']['name']
|
||||
if record.get('transfer_schedule')
|
||||
else None),
|
||||
'source-vserver': record['source']['svm']['name'],
|
||||
'source-volume': (record['source']['path'].split(':')[1] if
|
||||
record.get('source') else None),
|
||||
@ -4930,6 +4937,31 @@ class NetAppRestClient(object):
|
||||
policy_name.append(record.get('name'))
|
||||
return policy_name
|
||||
|
||||
@na_utils.trace
|
||||
def create_snapmirror_policy(self, policy_name,
|
||||
policy_type='async',
|
||||
discard_network_info=True,
|
||||
preserve_snapshots=True,
|
||||
snapmirror_label='all_source_snapshots',
|
||||
keep=1):
|
||||
"""Create SnapMirror Policy"""
|
||||
|
||||
if policy_type == "vault":
|
||||
body = {"name": policy_name, "type": "async",
|
||||
"create_snapshot_on_source": False}
|
||||
else:
|
||||
body = {"name": policy_name, "type": policy_type}
|
||||
if discard_network_info:
|
||||
body["exclude_network_config"] = {'svmdr-config-obj': 'network'}
|
||||
if preserve_snapshots:
|
||||
body["retention"] = [{"label": snapmirror_label, "count": keep}]
|
||||
try:
|
||||
self.send_request('/snapmirror/policies/', 'post', body=body)
|
||||
except netapp_api.api.NaApiError as e:
|
||||
LOG.debug('Failed to create SnapMirror policy. '
|
||||
'Error: %s. Code: %s', e.message, e.code)
|
||||
raise
|
||||
|
||||
@na_utils.trace
|
||||
def delete_snapmirror_policy(self, policy_name):
|
||||
"""Deletes a SnapMirror policy."""
|
||||
@ -5362,3 +5394,56 @@ class NetAppRestClient(object):
|
||||
|
||||
# Convert Bytes to GBs.
|
||||
return (total_volumes_size / 1024**3)
|
||||
|
||||
def snapmirror_restore_vol(self, source_path=None, dest_path=None,
|
||||
source_vserver=None, dest_vserver=None,
|
||||
source_volume=None, dest_volume=None,
|
||||
source_snapshot=None):
|
||||
"""Restore snapshot copy from destination volume to source volume"""
|
||||
snapmirror_info = self.get_snapmirror_destinations(dest_path,
|
||||
source_path,
|
||||
dest_vserver,
|
||||
source_vserver,
|
||||
dest_volume,
|
||||
source_volume,
|
||||
)
|
||||
if not snapmirror_info:
|
||||
msg = _("There is no relationship between source "
|
||||
"'%(source_path)s' and destination cluster"
|
||||
" '%(des_path)s'")
|
||||
msg_args = {'source_path': source_path,
|
||||
'des_path': dest_path,
|
||||
}
|
||||
raise exception.NetAppException(msg % msg_args)
|
||||
uuid = snapmirror_info[0].get('uuid')
|
||||
body = {"destination": {"path": dest_path},
|
||||
"source_snapshot": source_snapshot}
|
||||
try:
|
||||
self.send_request(f"/snapmirror/relationships/{uuid}/restore",
|
||||
'post', body=body)
|
||||
except netapp_api.api.NaApiError as e:
|
||||
LOG.debug('Snapmirror restore has failed. Error: %s. Code: %s',
|
||||
e.message, e.code)
|
||||
raise
|
||||
|
||||
@na_utils.trace
|
||||
def list_volume_snapshots(self, volume_name, snapmirror_label=None,
|
||||
newer_than=None):
|
||||
"""Gets list of snapshots of volume."""
|
||||
volume = self._get_volume_by_args(vol_name=volume_name)
|
||||
uuid = volume['uuid']
|
||||
query = {}
|
||||
if snapmirror_label:
|
||||
query = {
|
||||
'snapmirror_label': snapmirror_label,
|
||||
}
|
||||
|
||||
if newer_than:
|
||||
query['create_time'] = '>' + newer_than
|
||||
|
||||
response = self.send_request(
|
||||
f'/storage/volumes/{uuid}/snapshots/',
|
||||
'get', query=query)
|
||||
|
||||
return [snapshot_info['name']
|
||||
for snapshot_info in response['records']]
|
||||
|
@ -67,10 +67,30 @@ def get_backend_configuration(backend_name):
|
||||
config.append_config_values(na_opts.netapp_support_opts)
|
||||
config.append_config_values(na_opts.netapp_provisioning_opts)
|
||||
config.append_config_values(na_opts.netapp_data_motion_opts)
|
||||
config.append_config_values(na_opts.netapp_proxy_opts)
|
||||
config.append_config_values(na_opts.netapp_backup_opts)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_backup_configuration(backup_name):
|
||||
config_stanzas = CONF.list_all_sections()
|
||||
if backup_name not in config_stanzas:
|
||||
msg = _("Could not find backend stanza %(backup_name)s in "
|
||||
"configuration which is required for backup workflows "
|
||||
"with the source share. Available stanzas are "
|
||||
"%(stanzas)s")
|
||||
params = {
|
||||
"stanzas": config_stanzas,
|
||||
"backend_name": backup_name,
|
||||
}
|
||||
raise exception.BadConfigurationException(reason=msg % params)
|
||||
config = configuration.Configuration(driver.share_opts,
|
||||
config_group=backup_name)
|
||||
config.append_config_values(na_opts.netapp_backup_opts)
|
||||
return config
|
||||
|
||||
|
||||
def get_client_for_backend(backend_name, vserver_name=None):
|
||||
config = get_backend_configuration(backend_name)
|
||||
if config.netapp_use_legacy_client:
|
||||
@ -913,3 +933,58 @@ class DataMotionSession(object):
|
||||
LOG.exception(
|
||||
'Error releasing snapmirror destination %s for '
|
||||
'replica %s.', destination['id'], replica['id'])
|
||||
|
||||
def get_most_available_aggr_of_vserver(self, vserver_client):
|
||||
"""Get most available aggregate"""
|
||||
aggrs_space_attr = vserver_client.get_vserver_aggregate_capacities()
|
||||
if not aggrs_space_attr:
|
||||
return None
|
||||
aggr_list = list(aggrs_space_attr.keys())
|
||||
most_available_aggr = aggr_list[0]
|
||||
for aggr in aggr_list:
|
||||
if (aggrs_space_attr.get(aggr).get('available')
|
||||
> aggrs_space_attr.get(
|
||||
most_available_aggr).get('available')):
|
||||
most_available_aggr = aggr
|
||||
return most_available_aggr
|
||||
|
||||
def initialize_and_wait_snapmirror_vol(self, vserver_client,
|
||||
source_vserver, source_volume,
|
||||
dest_vserver, dest_volume,
|
||||
source_snapshot=None,
|
||||
transfer_priority=None,
|
||||
timeout=300):
|
||||
"""Initialize and wait for SnapMirror relationship"""
|
||||
interval = 10
|
||||
retries = (timeout / interval or 1)
|
||||
vserver_client.initialize_snapmirror_vol(
|
||||
source_vserver,
|
||||
source_volume,
|
||||
dest_vserver,
|
||||
dest_volume,
|
||||
source_snapshot=source_snapshot,
|
||||
transfer_priority=transfer_priority,
|
||||
)
|
||||
|
||||
@utils.retry(exception.NetAppException, interval=interval,
|
||||
retries=retries, backoff_rate=1)
|
||||
def wait_for_initialization():
|
||||
source_path = f"{source_vserver}:{source_volume}"
|
||||
des_path = f"{dest_vserver}:{dest_volume}"
|
||||
snapmirror_info = vserver_client.get_snapmirrors(
|
||||
source_path=source_path, dest_path=des_path)
|
||||
relationship_status = snapmirror_info[0].get("relationship-status")
|
||||
if relationship_status == "idle":
|
||||
return
|
||||
else:
|
||||
msg = (_('Snapmirror relationship status is: %s. Waiting '
|
||||
'until it has been initialized.') %
|
||||
relationship_status)
|
||||
raise exception.NetAppException(message=msg)
|
||||
|
||||
try:
|
||||
wait_for_initialization()
|
||||
except exception.NetAppException:
|
||||
msg = _("Timed out while wait for SnapMirror relationship to "
|
||||
"be initialized")
|
||||
raise exception.NetAppException(message=msg)
|
||||
|
@ -380,3 +380,21 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
return self.library.update_share_server_network_allocations(
|
||||
context, share_server, current_network_allocations,
|
||||
new_network_allocations, security_services, shares, snapshots)
|
||||
|
||||
def create_backup(self, context, share, backup, **kwargs):
|
||||
return self.library.create_backup(context, share, backup, **kwargs)
|
||||
|
||||
def create_backup_continue(self, context, share, backup, **kwargs):
|
||||
return self.library.create_backup_continue(context, share, backup,
|
||||
**kwargs)
|
||||
|
||||
def restore_backup(self, context, backup, share, **kwargs):
|
||||
return self.library.restore_backup(context, backup, share,
|
||||
**kwargs)
|
||||
|
||||
def restore_backup_continue(self, context, backup, share, **kwargs):
|
||||
return self.library.restore_backup_continue(context, backup, share,
|
||||
**kwargs)
|
||||
|
||||
def delete_backup(self, context, backup, share, **kwargs):
|
||||
return self.library.delete_backup(context, backup, share, **kwargs)
|
||||
|
@ -345,3 +345,20 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
self, context, share_server, current_network_allocations,
|
||||
new_network_allocations, security_services, shares, snapshots):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_backup(self, context, share, backup, **kwargs):
|
||||
return self.library.create_backup(context, share, backup, **kwargs)
|
||||
|
||||
def create_backup_continue(self, context, share, backup, **kwargs):
|
||||
return self.library.create_backup_continue(context, share, backup,
|
||||
**kwargs)
|
||||
|
||||
def restore_backup(self, context, backup, share, **kwargs):
|
||||
return self.library.restore_backup(context, backup, share, **kwargs)
|
||||
|
||||
def restore_backup_continue(self, context, backup, share, **kwargs):
|
||||
return self.library.restore_backup_continue(context, backup, share,
|
||||
**kwargs)
|
||||
|
||||
def delete_backup(self, context, backup, share, **kwargs):
|
||||
return self.library.delete_backup(context, backup, share, **kwargs)
|
||||
|
@ -21,11 +21,13 @@ single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
from enum import Enum
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import socket
|
||||
|
||||
from manila.exception import SnapshotResourceNotFound
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_service import loopingcall
|
||||
@ -57,8 +59,22 @@ LOG = log.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class NetAppCmodeFileStorageLibrary(object):
|
||||
class Backup(Enum):
|
||||
"""Enum for share backup"""
|
||||
BACKUP_TYPE = "backup_type"
|
||||
BACKEND_NAME = "netapp_backup_backend_section_name"
|
||||
DES_VSERVER = "netapp_backup_vserver"
|
||||
DES_VOLUME = "netapp_backup_share"
|
||||
SM_LABEL = "backup"
|
||||
DES_VSERVER_PREFIX = "backup_vserver"
|
||||
DES_VOLUME_PREFIX = "backup_volume"
|
||||
VOLUME_TYPE = "dp"
|
||||
SM_POLICY = "os_backup_policy"
|
||||
TOTAL_PROGRESS_HUNDRED = "100"
|
||||
TOTAL_PROGRESS_ZERO = "0"
|
||||
|
||||
|
||||
class NetAppCmodeFileStorageLibrary(object):
|
||||
AUTOSUPPORT_INTERVAL_SECONDS = 3600 # hourly
|
||||
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
|
||||
HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes
|
||||
@ -157,6 +173,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._licenses = []
|
||||
self._client = None
|
||||
self._clients = {}
|
||||
self._backend_clients = {}
|
||||
self._ssc_stats = {}
|
||||
self._have_cluster_creds = None
|
||||
self._revert_to_snapshot_support = False
|
||||
@ -177,6 +194,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._snapmirror_schedule = self._convert_schedule_to_seconds(
|
||||
schedule=self.configuration.netapp_snapmirror_schedule)
|
||||
self._cluster_name = self.configuration.netapp_cluster_name
|
||||
self.is_volume_backup_before = False
|
||||
|
||||
@na_utils.trace
|
||||
def do_setup(self, context):
|
||||
@ -218,39 +236,51 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
def _get_vserver(self, share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_client(self, config, vserver=None):
|
||||
if config.netapp_use_legacy_client:
|
||||
client = client_cmode.NetAppCmodeClient(
|
||||
transport_type=config.netapp_transport_type,
|
||||
ssl_cert_path=config.netapp_ssl_cert_path,
|
||||
username=config.netapp_login,
|
||||
password=config.netapp_password,
|
||||
hostname=config.netapp_server_hostname,
|
||||
port=config.netapp_server_port,
|
||||
vserver=vserver,
|
||||
trace=na_utils.TRACE_API,
|
||||
api_trace_pattern=na_utils.API_TRACE_PATTERN)
|
||||
else:
|
||||
client = client_cmode_rest.NetAppRestClient(
|
||||
transport_type=config.netapp_transport_type,
|
||||
ssl_cert_path=config.netapp_ssl_cert_path,
|
||||
username=config.netapp_login,
|
||||
password=config.netapp_password,
|
||||
hostname=config.netapp_server_hostname,
|
||||
port=config.netapp_server_port,
|
||||
vserver=vserver,
|
||||
trace=na_utils.TRACE_API,
|
||||
async_rest_timeout=(
|
||||
config.netapp_rest_operation_timeout),
|
||||
api_trace_pattern=na_utils.API_TRACE_PATTERN)
|
||||
return client
|
||||
|
||||
@na_utils.trace
|
||||
def _get_api_client(self, vserver=None):
|
||||
|
||||
# Use cached value to prevent redo calls during client initialization.
|
||||
client = self._clients.get(vserver)
|
||||
|
||||
if not client:
|
||||
if self.configuration.netapp_use_legacy_client:
|
||||
client = client_cmode.NetAppCmodeClient(
|
||||
transport_type=self.configuration.netapp_transport_type,
|
||||
ssl_cert_path=self.configuration.netapp_ssl_cert_path,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password,
|
||||
hostname=self.configuration.netapp_server_hostname,
|
||||
port=self.configuration.netapp_server_port,
|
||||
vserver=vserver,
|
||||
trace=na_utils.TRACE_API,
|
||||
api_trace_pattern=na_utils.API_TRACE_PATTERN)
|
||||
else:
|
||||
client = client_cmode_rest.NetAppRestClient(
|
||||
transport_type=self.configuration.netapp_transport_type,
|
||||
ssl_cert_path=self.configuration.netapp_ssl_cert_path,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password,
|
||||
hostname=self.configuration.netapp_server_hostname,
|
||||
port=self.configuration.netapp_server_port,
|
||||
vserver=vserver,
|
||||
trace=na_utils.TRACE_API,
|
||||
async_rest_timeout=(
|
||||
self.configuration.netapp_rest_operation_timeout),
|
||||
api_trace_pattern=na_utils.API_TRACE_PATTERN)
|
||||
client = self._get_client(self.configuration, vserver=vserver)
|
||||
self._clients[vserver] = client
|
||||
return client
|
||||
|
||||
@na_utils.trace
|
||||
def _get_api_client_for_backend(self, backend_name, vserver=None):
|
||||
key = f"{backend_name}-{vserver}"
|
||||
client = self._backend_clients.get(key)
|
||||
if not client:
|
||||
config = data_motion.get_backend_configuration(backend_name)
|
||||
client = self._get_client(config, vserver=vserver)
|
||||
self._backend_clients[key] = client
|
||||
return client
|
||||
|
||||
@na_utils.trace
|
||||
@ -648,6 +678,14 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
"""Find all aggregates match pattern."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_backup_vserver(self, backup, share_server=None):
|
||||
"""Get/Create the vserver for backup """
|
||||
raise NotImplementedError()
|
||||
|
||||
def _delete_backup_vserver(self, backup, des_vserver):
|
||||
"""Delete the vserver for backup """
|
||||
raise NotImplementedError()
|
||||
|
||||
@na_utils.trace
|
||||
def _get_flexgroup_aggr_set(self):
|
||||
aggr = set()
|
||||
@ -4281,3 +4319,639 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
|
||||
pool_name = share_utils.extract_host(host, level='pool')
|
||||
return pool_name in pools
|
||||
|
||||
@na_utils.trace
|
||||
def create_backup(self, context, share_instance, backup,
|
||||
share_server=None):
|
||||
"""Create backup for NetApp share"""
|
||||
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server)
|
||||
src_cluster = src_vserver_client.get_cluster_name()
|
||||
src_vol = self._get_backend_share_name(share_instance['id'])
|
||||
backup_options = backup.get('backup_options', {})
|
||||
backup_type = backup_options.get(Backup.BACKUP_TYPE.value)
|
||||
|
||||
# Check if valid backup type is provided
|
||||
if not backup_type:
|
||||
raise exception.BackupException("Driver needs a valid backup type"
|
||||
" from command line or API.")
|
||||
|
||||
# check the backend is related to NetApp
|
||||
backup_config = data_motion.get_backup_configuration(backup_type)
|
||||
backend_name = backup_config.safe_get(Backup.BACKEND_NAME.value)
|
||||
backend_config = data_motion.get_backend_configuration(
|
||||
backend_name)
|
||||
if (backend_config.safe_get("netapp_storage_family")
|
||||
!= 'ontap_cluster'):
|
||||
err_msg = _("Wrong vendor backend %s is provided, provide"
|
||||
" only NetApp backend.") % backend_name
|
||||
raise exception.BackupException(err_msg)
|
||||
|
||||
# Check backend has compatible backup type
|
||||
if (backend_config.safe_get("netapp_enabled_backup_types") is None or
|
||||
backup_type not in backend_config.safe_get(
|
||||
"netapp_enabled_backup_types")):
|
||||
err_msg = _("Backup type '%(backup_type)s' is not compatible with"
|
||||
" backend '%(backend_name)s'.")
|
||||
msg_args = {
|
||||
'backup_type': backup_type,
|
||||
'backend_name': backend_name,
|
||||
}
|
||||
raise exception.BackupException(err_msg % msg_args)
|
||||
|
||||
# Verify that both source and destination cluster are peered
|
||||
des_cluster_api_client = self._get_api_client_for_backend(
|
||||
backend_name)
|
||||
des_cluster = des_cluster_api_client.get_cluster_name()
|
||||
if src_cluster != des_cluster:
|
||||
cluster_peer_info = self._client.get_cluster_peers(
|
||||
remote_cluster_name=des_cluster)
|
||||
if not cluster_peer_info:
|
||||
err_msg = _("Source cluster '%(src_cluster)s' and destination"
|
||||
" cluster '%(des_cluster)s' are not peered"
|
||||
" backend %(backend_name)s.")
|
||||
msg_args = {
|
||||
'src_cluster': src_cluster,
|
||||
'des_cluster': des_cluster,
|
||||
'backend_name': backend_name
|
||||
}
|
||||
raise exception.NetAppException(err_msg % msg_args)
|
||||
|
||||
# Get the destination vserver and volume for relationship
|
||||
source_path = f"{src_vserver}:{src_vol}"
|
||||
snapmirror_info = src_vserver_client.get_snapmirror_destinations(
|
||||
source_path=source_path)
|
||||
if len(snapmirror_info) > 1:
|
||||
err_msg = _("Source path %(path)s has more than one relationships."
|
||||
" To create the share backup, delete the all source"
|
||||
" volume's SnapMirror relationships using 'snapmirror'"
|
||||
" ONTAP CLI or System Manger.")
|
||||
msg_args = {
|
||||
'path': source_path
|
||||
}
|
||||
raise exception.NetAppException(err_msg % msg_args)
|
||||
elif len(snapmirror_info) == 1:
|
||||
des_vserver, des_volume = self._get_destination_vserver_and_vol(
|
||||
src_vserver_client, source_path, False)
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name, vserver=des_vserver)
|
||||
else:
|
||||
if (backup_config.safe_get(Backup.DES_VOLUME.value) and
|
||||
not backup_config.safe_get(Backup.DES_VSERVER.value)):
|
||||
msg = _("Could not find vserver name under stanza"
|
||||
" '%(backup_type)s' in configuration while volume"
|
||||
" name is provided.")
|
||||
params = {"backup_type": backup_type}
|
||||
raise exception.BadConfigurationException(reason=msg % params)
|
||||
|
||||
des_vserver = self._get_vserver_for_backup(
|
||||
backup, share_server=share_server)
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name, vserver=des_vserver)
|
||||
try:
|
||||
des_volume = self._get_volume_for_backup(backup,
|
||||
share_instance,
|
||||
src_vserver_client,
|
||||
des_vserver_client)
|
||||
except (netapp_api.NaApiError, exception.NetAppException):
|
||||
# Delete the vserver
|
||||
if share_server:
|
||||
self._delete_backup_vserver(backup, des_vserver)
|
||||
|
||||
msg = _("Failed to create a volume in vserver %(des_vserver)s")
|
||||
msg_args = {'des_vserver': des_vserver}
|
||||
raise exception.NetAppException(msg % msg_args)
|
||||
|
||||
if (src_vserver != des_vserver and
|
||||
len(src_vserver_client.get_vserver_peers(
|
||||
src_vserver, des_vserver)) == 0):
|
||||
src_vserver_client.create_vserver_peer(
|
||||
src_vserver, des_vserver,
|
||||
peer_cluster_name=des_cluster)
|
||||
if des_cluster is not None and src_cluster != des_cluster:
|
||||
des_vserver_client.accept_vserver_peer(des_vserver,
|
||||
src_vserver)
|
||||
des_snapshot_list = (des_vserver_client.
|
||||
list_volume_snapshots(des_volume))
|
||||
snap_list_with_backup = [
|
||||
snap for snap in des_snapshot_list if snap.startswith(
|
||||
Backup.SM_LABEL.value)
|
||||
]
|
||||
if len(snap_list_with_backup) == 1:
|
||||
self.is_volume_backup_before = True
|
||||
|
||||
policy_name = f"{Backup.SM_POLICY.value}_{share_instance['id']}"
|
||||
try:
|
||||
des_vserver_client.create_snapmirror_policy(
|
||||
policy_name,
|
||||
policy_type="vault",
|
||||
discard_network_info=False,
|
||||
snapmirror_label=Backup.SM_LABEL.value,
|
||||
keep=250)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if 'policy with this name already exists' in e.message:
|
||||
exc_context.reraise = False
|
||||
try:
|
||||
des_vserver_client.create_snapmirror_vol(
|
||||
src_vserver,
|
||||
src_vol,
|
||||
des_vserver,
|
||||
des_volume,
|
||||
"extended_data_protection",
|
||||
policy=policy_name,
|
||||
)
|
||||
db_session = data_motion.DataMotionSession()
|
||||
db_session.initialize_and_wait_snapmirror_vol(
|
||||
des_vserver_client,
|
||||
src_vserver,
|
||||
src_vol,
|
||||
des_vserver,
|
||||
des_volume,
|
||||
timeout=backup_config.netapp_snapmirror_job_timeout
|
||||
)
|
||||
except netapp_api.NaApiError:
|
||||
self._resource_cleanup_for_backup(backup,
|
||||
share_instance,
|
||||
des_vserver,
|
||||
des_volume,
|
||||
share_server=share_server)
|
||||
msg = _("SnapVault relationship creation or initialization"
|
||||
" failed between source %(source_vserver)s:"
|
||||
"%(source_volume)s and destination %(des_vserver)s:"
|
||||
"%(des_volume)s for share id %(share_id)s.")
|
||||
|
||||
msg_args = {
|
||||
'source_vserver': src_vserver,
|
||||
'source_volume': src_vol,
|
||||
'des_vserver': des_vserver,
|
||||
'des_volume': des_volume,
|
||||
'share_id': share_instance['share_id']
|
||||
}
|
||||
raise exception.NetAppException(msg % msg_args)
|
||||
|
||||
snapshot_name = self._get_backup_snapshot_name(backup,
|
||||
share_instance['id'])
|
||||
src_vserver_client.create_snapshot(
|
||||
src_vol, snapshot_name,
|
||||
snapmirror_label=Backup.SM_LABEL.value)
|
||||
|
||||
# Update the SnapMirror relationship
|
||||
des_vserver_client.update_snapmirror_vol(src_vserver,
|
||||
src_vol,
|
||||
des_vserver,
|
||||
des_volume)
|
||||
LOG.debug("SnapMirror relationship updated successfully.")
|
||||
|
||||
@na_utils.trace
|
||||
def create_backup_continue(self, context, share_instance, backup,
|
||||
share_server=None):
|
||||
"""Keep tracking the status of share backup"""
|
||||
|
||||
progress_status = {'total_progress': Backup.TOTAL_PROGRESS_ZERO.value}
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server)
|
||||
src_vol_name = self._get_backend_share_name(share_instance['id'])
|
||||
backend_name = self._get_backend(backup)
|
||||
source_path = f"{src_vserver}:{src_vol_name}"
|
||||
LOG.debug("SnapMirror source path: %s", source_path)
|
||||
backup_type = backup.get(Backup.BACKUP_TYPE.value)
|
||||
backup_config = data_motion.get_backup_configuration(backup_type)
|
||||
|
||||
# Make sure SnapMirror relationship is created
|
||||
snapmirror_info = src_vserver_client.get_snapmirror_destinations(
|
||||
source_path=source_path,
|
||||
)
|
||||
if not snapmirror_info:
|
||||
LOG.warning("There is no SnapMirror relationship available for"
|
||||
" source path yet %s.", source_path)
|
||||
return progress_status
|
||||
|
||||
des_vserver, des_vol = self._get_destination_vserver_and_vol(
|
||||
src_vserver_client,
|
||||
source_path,
|
||||
)
|
||||
if not des_vserver or not des_vol:
|
||||
raise exception.NetAppException("Not able to find vserver "
|
||||
" and volume from SnpMirror"
|
||||
" relationship.")
|
||||
des_path = f"{des_vserver}:{des_vol}"
|
||||
LOG.debug("SnapMirror destination path: %s", des_path)
|
||||
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name,
|
||||
vserver=des_vserver,
|
||||
)
|
||||
snapmirror_info = des_vserver_client.get_snapmirrors(
|
||||
source_path=source_path, dest_path=des_path)
|
||||
if not snapmirror_info:
|
||||
msg_args = {
|
||||
'source_path': source_path,
|
||||
'des_path': des_path,
|
||||
}
|
||||
msg = _("There is no SnapMirror relationship available for"
|
||||
" source path '%(source_path)s' and destination path"
|
||||
" '%(des_path)s' yet.") % msg_args
|
||||
LOG.warning(msg, msg_args)
|
||||
return progress_status
|
||||
LOG.debug("SnapMirror details %s:", snapmirror_info)
|
||||
progress_status["total_progress"] = (Backup.
|
||||
TOTAL_PROGRESS_HUNDRED.value)
|
||||
if snapmirror_info[0].get("last-transfer-type") != "update":
|
||||
progress_status["total_progress"] = (Backup.
|
||||
TOTAL_PROGRESS_ZERO.value)
|
||||
return progress_status
|
||||
|
||||
if snapmirror_info[0].get("relationship-status") != "idle":
|
||||
progress_status = self._get_backup_progress_status(
|
||||
des_vserver_client, snapmirror_info)
|
||||
LOG.debug("Progress status: %(progress_status)s",
|
||||
{'progress_status': progress_status})
|
||||
return progress_status
|
||||
|
||||
# Verify that snapshot is transferred to destination volume
|
||||
snap_name = self._get_backup_snapshot_name(backup,
|
||||
share_instance['id'])
|
||||
self._verify_and_wait_for_snapshot_to_transfer(des_vserver_client,
|
||||
des_vol,
|
||||
snap_name)
|
||||
LOG.debug("Snapshot '%(snap_name)s' transferred successfully to"
|
||||
" destination", {'snap_name': snap_name})
|
||||
# previously if volume was part of some relationship and if we delete
|
||||
# all the backup of share then last snapshot will be left on
|
||||
# destination volume, and we can't delete that snapshot due to ONTAP
|
||||
# restriction. Next time if user create the first backup then we
|
||||
# update the destination volume with latest backup and delete the last
|
||||
# leftover snapshot
|
||||
is_backup_completed = (progress_status["total_progress"]
|
||||
== Backup.TOTAL_PROGRESS_HUNDRED.value)
|
||||
if backup_config.get(Backup.DES_VOLUME.value) and is_backup_completed:
|
||||
snap_list_with_backup = self._get_des_volume_backup_snapshots(
|
||||
des_vserver_client,
|
||||
des_vol, share_instance['id']
|
||||
)
|
||||
LOG.debug("Snapshot list for backup %(snap_list)s.",
|
||||
{'snap_list': snap_list_with_backup})
|
||||
if (self.is_volume_backup_before and
|
||||
len(snap_list_with_backup) == 2):
|
||||
if snap_name == snap_list_with_backup[0]:
|
||||
snap_to_delete = snap_list_with_backup[1]
|
||||
else:
|
||||
snap_to_delete = snap_list_with_backup[0]
|
||||
self.is_volume_backup_before = False
|
||||
des_vserver_client.delete_snapshot(des_vol, snap_to_delete,
|
||||
True)
|
||||
LOG.debug("Previous snapshot %{snap_name}s deleted"
|
||||
" successfully. ", {'snap_name': snap_to_delete})
|
||||
return progress_status
|
||||
|
||||
@na_utils.trace
|
||||
def restore_backup(self, context, backup, share_instance,
|
||||
share_server=None):
|
||||
"""Restore the share backup"""
|
||||
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server,
|
||||
)
|
||||
src_vol_name = self._get_backend_share_name(share_instance['id'])
|
||||
|
||||
source_path = f"{src_vserver}:{src_vol_name}"
|
||||
des_vserver, des_vol = self._get_destination_vserver_and_vol(
|
||||
src_vserver_client,
|
||||
source_path,
|
||||
)
|
||||
if not des_vserver or not des_vol:
|
||||
raise exception.NetAppException("Not able to find vserver "
|
||||
" and volume from SnpMirror"
|
||||
" relationship.")
|
||||
snap_name = self._get_backup_snapshot_name(backup,
|
||||
share_instance['id'])
|
||||
source_path = src_vserver + ":" + src_vol_name
|
||||
des_path = des_vserver + ":" + des_vol
|
||||
src_vserver_client.snapmirror_restore_vol(source_path=des_path,
|
||||
dest_path=source_path,
|
||||
source_snapshot=snap_name)
|
||||
|
||||
@na_utils.trace
|
||||
def restore_backup_continue(self, context, backup,
|
||||
share_instance, share_server=None):
|
||||
"""Keep checking the restore operation status"""
|
||||
|
||||
progress_status = {}
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server)
|
||||
src_vol_name = self._get_backend_share_name(share_instance['id'])
|
||||
|
||||
source_path = f"{src_vserver}:{src_vol_name}"
|
||||
snapmirror_info = src_vserver_client.get_snapmirrors(
|
||||
dest_path=source_path,
|
||||
)
|
||||
if snapmirror_info:
|
||||
progress_status = {
|
||||
"total_progress": Backup.TOTAL_PROGRESS_ZERO.value
|
||||
}
|
||||
return progress_status
|
||||
LOG.debug("SnapMirror relationship of type RST is deleted")
|
||||
snap_name = self._get_backup_snapshot_name(backup,
|
||||
share_instance['id'])
|
||||
snapshot_list = src_vserver_client.list_volume_snapshots(src_vol_name)
|
||||
for snapshot in snapshot_list:
|
||||
if snap_name in snapshot:
|
||||
progress_status["total_progress"] = (
|
||||
Backup.TOTAL_PROGRESS_HUNDRED.value)
|
||||
return progress_status
|
||||
if not progress_status:
|
||||
err_msg = _("Failed to restore the snapshot %s.") % snap_name
|
||||
raise exception.NetAppException(err_msg)
|
||||
|
||||
@na_utils.trace
|
||||
def delete_backup(self, context, backup, share_instance,
|
||||
share_server=None):
|
||||
"""Delete the share backup for netapp share"""
|
||||
|
||||
try:
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server,
|
||||
)
|
||||
except exception.VserverNotFound:
|
||||
LOG.warning("Vserver associated with share %s was not found.",
|
||||
share_instance['id'])
|
||||
return
|
||||
src_vol_name = self._get_backend_share_name(share_instance['id'])
|
||||
backend_name = self._get_backend(backup)
|
||||
if backend_name is None:
|
||||
return
|
||||
|
||||
source_path = f"{src_vserver}:{src_vol_name}"
|
||||
des_vserver, des_vol = self._get_destination_vserver_and_vol(
|
||||
src_vserver_client,
|
||||
source_path,
|
||||
False,
|
||||
)
|
||||
|
||||
if not des_vserver or not des_vol:
|
||||
LOG.debug("Not able to find vserver and volume from SnpMirror"
|
||||
" relationship.")
|
||||
return
|
||||
des_path = f"{des_vserver}:{des_vol}"
|
||||
|
||||
# Delete the snapshot from destination volume
|
||||
snap_name = self._get_backup_snapshot_name(backup,
|
||||
share_instance['id'])
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name,
|
||||
vserver=des_vserver,
|
||||
)
|
||||
try:
|
||||
list_snapshots = self._get_des_volume_backup_snapshots(
|
||||
des_vserver_client,
|
||||
des_vol,
|
||||
share_instance['id'],
|
||||
)
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception("Failed to get the snapshots from cluster,"
|
||||
" provide the right backup type or check the"
|
||||
" backend details are properly configured in"
|
||||
" manila.conf file.")
|
||||
return
|
||||
|
||||
snapmirror_info = des_vserver_client.get_snapmirrors(
|
||||
source_path=source_path,
|
||||
dest_path=des_path,
|
||||
)
|
||||
is_snapshot_deleted = self._is_snapshot_deleted(True)
|
||||
if snapmirror_info and len(list_snapshots) == 1:
|
||||
self._resource_cleanup_for_backup(backup,
|
||||
share_instance,
|
||||
des_vserver,
|
||||
des_vol,
|
||||
share_server=share_server)
|
||||
elif len(list_snapshots) > 1:
|
||||
try:
|
||||
des_vserver_client.delete_snapshot(des_vol, snap_name, True)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if "entry doesn't exist" in e.message:
|
||||
exc_context.reraise = False
|
||||
try:
|
||||
des_vserver_client.get_snapshot(des_vol, snap_name)
|
||||
is_snapshot_deleted = self._is_snapshot_deleted(False)
|
||||
except (SnapshotResourceNotFound, netapp_api.NaApiError):
|
||||
LOG.debug("Snapshot %s deleted successfully.", snap_name)
|
||||
if not is_snapshot_deleted:
|
||||
err_msg = _("Snapshot '%(snapshot_name)s' is not deleted"
|
||||
" successfully on ONTAP."
|
||||
% {"snapshot_name": snap_name})
|
||||
LOG.exception(err_msg)
|
||||
raise exception.NetAppException(err_msg)
|
||||
|
||||
@na_utils.trace
|
||||
def _is_snapshot_deleted(self, is_deleted):
|
||||
return is_deleted
|
||||
|
||||
@na_utils.trace
|
||||
def _get_backup_snapshot_name(self, backup, share_id):
|
||||
backup_id = backup.get('id', "")
|
||||
return f"{Backup.SM_LABEL.value}_{share_id}_{backup_id}"
|
||||
|
||||
@na_utils.trace
|
||||
def _get_backend(self, backup):
|
||||
backup_type = backup.get(Backup.BACKUP_TYPE.value)
|
||||
try:
|
||||
backup_config = data_motion.get_backup_configuration(backup_type)
|
||||
except Exception:
|
||||
LOG.exception("There is some issue while getting the"
|
||||
" backup configuration. Make sure correct"
|
||||
" backup type is provided while creating the"
|
||||
" backup.")
|
||||
return None
|
||||
return backup_config.safe_get(Backup.BACKEND_NAME.value)
|
||||
|
||||
@na_utils.trace
|
||||
def _get_des_volume_backup_snapshots(self, des_vserver_client,
|
||||
des_vol, share_id):
|
||||
"""Get the list of snapshot from destination volume"""
|
||||
|
||||
des_snapshot_list = (des_vserver_client.
|
||||
list_volume_snapshots(des_vol,
|
||||
Backup.SM_LABEL.value))
|
||||
backup_filter = f"{Backup.SM_LABEL.value}_{share_id}"
|
||||
snap_list_with_backup = [snap for snap in des_snapshot_list
|
||||
if snap.startswith(backup_filter)]
|
||||
return snap_list_with_backup
|
||||
|
||||
@na_utils.trace
|
||||
def _get_vserver_for_backup(self, backup, share_server=None):
|
||||
"""Get the destination vserver
|
||||
|
||||
if vserver not provided we are creating the new one
|
||||
in case of dhss_true
|
||||
"""
|
||||
backup_type_config = data_motion.get_backup_configuration(
|
||||
backup.get(Backup.BACKUP_TYPE.value))
|
||||
if backup_type_config.get(Backup.DES_VSERVER.value):
|
||||
return backup_type_config.get(Backup.DES_VSERVER.value)
|
||||
else:
|
||||
return self._get_backup_vserver(backup, share_server=share_server)
|
||||
|
||||
@na_utils.trace
|
||||
def _get_volume_for_backup(self, backup, share_instance,
|
||||
src_vserver_client, des_vserver_client):
|
||||
"""Get the destination volume
|
||||
|
||||
if volume is not provided in config file under backup_type stanza
|
||||
then create the new one
|
||||
"""
|
||||
|
||||
dm_session = data_motion.DataMotionSession()
|
||||
backup_type = backup.get(Backup.BACKUP_TYPE.value)
|
||||
backup_type_config = data_motion.get_backup_configuration(backup_type)
|
||||
if (backup_type_config.get(Backup.DES_VSERVER.value) and
|
||||
backup_type_config.get(Backup.DES_VOLUME.value)):
|
||||
return backup_type_config.get(Backup.DES_VOLUME.value)
|
||||
else:
|
||||
des_aggr = dm_session.get_most_available_aggr_of_vserver(
|
||||
des_vserver_client)
|
||||
if not des_aggr:
|
||||
msg = _("Not able to find any aggregate from ONTAP"
|
||||
" to create the volume")
|
||||
raise exception.NetAppException(msg)
|
||||
src_vol = self._get_backend_share_name(share_instance['id'])
|
||||
vol_attr = src_vserver_client.get_volume(src_vol)
|
||||
source_vol_size = vol_attr.get('size')
|
||||
vol_size_in_gb = int(source_vol_size) / units.Gi
|
||||
share_id = share_instance['id'].replace('-', '_')
|
||||
des_volume = f"backup_volume_{share_id}"
|
||||
des_vserver_client.create_volume(des_aggr, des_volume,
|
||||
vol_size_in_gb, volume_type='dp')
|
||||
return des_volume
|
||||
|
||||
@na_utils.trace
|
||||
def _get_destination_vserver_and_vol(self, src_vserver_client,
|
||||
source_path, validate_relation=True):
|
||||
"""Get Destination vserver and volume from SM relationship"""
|
||||
|
||||
des_vserver, des_vol = None, None
|
||||
snapmirror_info = src_vserver_client.get_snapmirror_destinations(
|
||||
source_path=source_path)
|
||||
if validate_relation and len(snapmirror_info) != 1:
|
||||
msg = _("There are more then one relationship with the source."
|
||||
" '%(source_path)s'." % {'source_path': source_path})
|
||||
raise exception.NetAppException(msg)
|
||||
if len(snapmirror_info) == 1:
|
||||
des_vserver = snapmirror_info[0].get("destination-vserver")
|
||||
des_vol = snapmirror_info[0].get("destination-volume")
|
||||
return des_vserver, des_vol
|
||||
|
||||
@na_utils.trace
|
||||
def _verify_and_wait_for_snapshot_to_transfer(self,
|
||||
des_vserver_client,
|
||||
des_vol,
|
||||
snap_name,
|
||||
timeout=300,
|
||||
):
|
||||
"""Wait and verify that snapshot is moved to destination"""
|
||||
|
||||
interval = 5
|
||||
retries = (timeout / interval or 1)
|
||||
|
||||
@manila_utils.retry(retry_param=(netapp_api.NaApiError,
|
||||
SnapshotResourceNotFound),
|
||||
interval=interval,
|
||||
retries=retries, backoff_rate=1)
|
||||
def _wait_for_snapshot_to_transfer():
|
||||
des_vserver_client.get_snapshot(des_vol, snap_name)
|
||||
try:
|
||||
_wait_for_snapshot_to_transfer()
|
||||
except (netapp_api.NaApiError, SnapshotResourceNotFound):
|
||||
msg = _("Timed out while wait for snapshot to transfer")
|
||||
raise exception.NetAppException(message=msg)
|
||||
|
||||
@na_utils.trace
|
||||
def _get_backup_progress_status(self, des_vserver_client,
|
||||
snapmirror_details):
|
||||
"""Calculate percentage of SnapMirror data transferred"""
|
||||
|
||||
des_vol = snapmirror_details[0].get("destination-volume")
|
||||
vol_attr = des_vserver_client.get_volume(des_vol)
|
||||
size_used = vol_attr.get('size-used')
|
||||
sm_data_transferred = snapmirror_details[0].get(
|
||||
"last-transfer-size")
|
||||
if size_used and sm_data_transferred:
|
||||
progress_status_percent = (int(sm_data_transferred) / int(
|
||||
size_used)) * 100
|
||||
return str(round(progress_status_percent, 2))
|
||||
else:
|
||||
return Backup.TOTAL_PROGRESS_ZERO.value
|
||||
|
||||
@na_utils.trace
|
||||
def _resource_cleanup_for_backup(self, backup, share_instance,
|
||||
des_vserver, des_vol,
|
||||
share_server=None):
|
||||
"""Cleanup the created resources
|
||||
|
||||
cleanup all created ONTAP resources when delete the last backup
|
||||
or in case of exception throw while creating the backup.
|
||||
"""
|
||||
src_vserver, src_vserver_client = self._get_vserver(
|
||||
share_server=share_server)
|
||||
dm_session = data_motion.DataMotionSession()
|
||||
backup_type_config = data_motion.get_backup_configuration(
|
||||
backup.get(Backup.BACKUP_TYPE.value))
|
||||
backend_name = backup_type_config.safe_get(Backup.BACKEND_NAME.value)
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name,
|
||||
vserver=des_vserver,
|
||||
)
|
||||
src_vol_name = self._get_backend_share_name(share_instance['id'])
|
||||
|
||||
# Abort relationship
|
||||
try:
|
||||
des_vserver_client.abort_snapmirror_vol(src_vserver,
|
||||
src_vol_name,
|
||||
des_vserver,
|
||||
des_vol,
|
||||
clear_checkpoint=False)
|
||||
except netapp_api.NaApiError:
|
||||
pass
|
||||
try:
|
||||
des_vserver_client.delete_snapmirror_vol(src_vserver,
|
||||
src_vol_name,
|
||||
des_vserver,
|
||||
des_vol)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if (e.code == netapp_api.EOBJECTNOTFOUND or
|
||||
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
|
||||
"(entry doesn't exist)" in e.message):
|
||||
exc_context.reraise = False
|
||||
|
||||
dm_session.wait_for_snapmirror_release_vol(
|
||||
src_vserver, des_vserver, src_vol_name,
|
||||
des_vol, False, src_vserver_client,
|
||||
timeout=backup_type_config.netapp_snapmirror_job_timeout)
|
||||
|
||||
try:
|
||||
policy_name = f"{Backup.SM_POLICY.value}_{share_instance['id']}"
|
||||
des_vserver_client.delete_snapmirror_policy(policy_name)
|
||||
except netapp_api.NaApiError:
|
||||
pass
|
||||
|
||||
# Delete the vserver peering
|
||||
try:
|
||||
src_vserver_client.delete_vserver_peer(src_vserver, des_vserver)
|
||||
except netapp_api.NaApiError:
|
||||
pass
|
||||
|
||||
# Delete volume
|
||||
if not backup_type_config.safe_get(Backup.DES_VOLUME.value):
|
||||
try:
|
||||
des_vserver_client.offline_volume(des_vol)
|
||||
des_vserver_client.delete_volume(des_vol)
|
||||
except netapp_api.NaApiError:
|
||||
pass
|
||||
|
||||
# Delete Vserver
|
||||
if share_server is not None:
|
||||
self._delete_backup_vserver(backup, des_vserver)
|
||||
|
@ -32,6 +32,7 @@ from manila.i18n import _
|
||||
from manila.message import message_field
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode_rest
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
@ -39,6 +40,7 @@ from manila.share import share_types
|
||||
from manila.share import utils as share_utils
|
||||
from manila import utils
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
|
||||
SEGMENTED_NETWORK_TYPES = ('vlan',)
|
||||
@ -2374,3 +2376,52 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
current_network_allocations, new_network_allocations,
|
||||
updated_export_locations)
|
||||
return updates
|
||||
|
||||
def _get_backup_vserver(self, backup, share_server=None):
|
||||
backend_name = self._get_backend(backup)
|
||||
backend_config = data_motion.get_backend_configuration(backend_name)
|
||||
des_cluster_api_client = self._get_api_client_for_backend(
|
||||
backend_name)
|
||||
|
||||
aggr_list = des_cluster_api_client.list_non_root_aggregates()
|
||||
aggr_pattern = (backend_config.
|
||||
netapp_aggregate_name_search_pattern)
|
||||
if aggr_pattern:
|
||||
aggr_matching_list = [
|
||||
element for element in aggr_list if re.search(aggr_pattern,
|
||||
element)
|
||||
]
|
||||
aggr_list = aggr_matching_list
|
||||
share_server_id = share_server['id']
|
||||
des_vserver = f"backup_{share_server_id}"
|
||||
LOG.debug("Creating vserver %s:", des_vserver)
|
||||
try:
|
||||
des_cluster_api_client.create_vserver(
|
||||
des_vserver,
|
||||
None,
|
||||
None,
|
||||
aggr_list,
|
||||
'Default',
|
||||
client_cmode_rest.DEFAULT_SECURITY_CERT_EXPIRE_DAYS,
|
||||
)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if 'already used' in e.message:
|
||||
exc_context.reraise = False
|
||||
return des_vserver
|
||||
|
||||
def _delete_backup_vserver(self, backup, des_vserver):
|
||||
"""Delete the vserver """
|
||||
|
||||
backend_name = self._get_backend(backup)
|
||||
des_vserver_client = self._get_api_client_for_backend(
|
||||
backend_name, vserver=des_vserver)
|
||||
try:
|
||||
des_cluster_api_client = self._get_api_client_for_backend(
|
||||
backend_name)
|
||||
des_cluster_api_client.delete_vserver(des_vserver,
|
||||
des_vserver_client)
|
||||
except exception.NetAppException as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if 'has shares' in e.msg:
|
||||
exc_context.reraise = False
|
||||
|
@ -26,6 +26,7 @@ from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
|
||||
@ -177,3 +178,13 @@ class NetAppCmodeSingleSVMFileStorageLibrary(
|
||||
if ipv6:
|
||||
versions.append(6)
|
||||
return versions
|
||||
|
||||
def _get_backup_vserver(self, backup, share_server=None):
|
||||
|
||||
backend_name = self._get_backend(backup)
|
||||
backend_config = data_motion.get_backend_configuration(backend_name)
|
||||
if share_server is not None:
|
||||
msg = _('Share server must not be passed to the driver '
|
||||
'when the driver is not managing share servers.')
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
return backend_config.netapp_vserver
|
||||
|
@ -300,6 +300,42 @@ netapp_data_motion_opts = [
|
||||
'a replica.'),
|
||||
]
|
||||
|
||||
netapp_backup_opts = [
|
||||
cfg.ListOpt('netapp_enabled_backup_types',
|
||||
default=[],
|
||||
help='Specify compatible backup_types for backend to provision'
|
||||
' backup share for SnapVault relationship. Multiple '
|
||||
'backup_types can be provided. If multiple backup types '
|
||||
'are enabled, create separate config sections for each '
|
||||
'backup type specifying the "netapp_backup_vserver", '
|
||||
'"netapp_backup_backend_section_name", '
|
||||
'"netapp_backup_share", and '
|
||||
'"netapp_snapmirror_job_timeout" as appropriate.'
|
||||
' Example- netapp_enabled_backup_types = eng_backup,'
|
||||
' finance_backup'),
|
||||
cfg.StrOpt('netapp_backup_backend_section_name',
|
||||
help='Backend (ONTAP cluster) name where backup volume will be '
|
||||
'provisioned. This is one of the backend which is enabled '
|
||||
'in manila.conf file.'),
|
||||
cfg.StrOpt('netapp_backup_vserver',
|
||||
default='',
|
||||
help='vserver name of backend that is use for backup the share.'
|
||||
' When user provide vserver value then backup volume will '
|
||||
' be created under this vserver '),
|
||||
cfg.StrOpt('netapp_backup_share',
|
||||
default='',
|
||||
help='Specify backup share name in case user wanted to backup '
|
||||
'the share. Some case user has dedicated volume for backup'
|
||||
' in this case use can provide dedicated volume. '
|
||||
'backup_share_server must be specified if backup_share is'
|
||||
' provided'),
|
||||
cfg.IntOpt('netapp_snapmirror_job_timeout',
|
||||
min=0,
|
||||
default=1800, # 30 minutes
|
||||
help='The maximum time in seconds to wait for a snapmirror '
|
||||
'related operation to backup to complete.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(netapp_proxy_opts)
|
||||
CONF.register_opts(netapp_connection_opts)
|
||||
@ -308,3 +344,4 @@ CONF.register_opts(netapp_basicauth_opts)
|
||||
CONF.register_opts(netapp_provisioning_opts)
|
||||
CONF.register_opts(netapp_support_opts)
|
||||
CONF.register_opts(netapp_data_motion_opts)
|
||||
CONF.register_opts(netapp_backup_opts)
|
||||
|
@ -65,6 +65,7 @@ SHARE_AGGREGATE_DISK_TYPES = ['SATA', 'SSD']
|
||||
EFFECTIVE_TYPE = 'fake_effective_type1'
|
||||
SHARE_NAME = 'fake_share'
|
||||
SHARE_SIZE = '1000000000'
|
||||
SHARE_USED_SIZE = '3456796'
|
||||
SHARE_NAME_2 = 'fake_share_2'
|
||||
FLEXGROUP_STYLE_EXTENDED = 'flexgroup'
|
||||
FLEXVOL_STYLE_EXTENDED = 'flexvol'
|
||||
@ -2351,6 +2352,7 @@ VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
|
||||
</volume-id-attributes>
|
||||
<volume-space-attributes>
|
||||
<size>%(size)s</size>
|
||||
<size-used>%(size-used)s</size-used>
|
||||
</volume-space-attributes>
|
||||
<volume-qos-attributes>
|
||||
<policy-group-name>%(qos-policy-group-name)s</policy-group-name>
|
||||
@ -2364,6 +2366,7 @@ VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
|
||||
'vserver': VSERVER_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'size': SHARE_SIZE,
|
||||
'size-used': SHARE_USED_SIZE,
|
||||
'qos-policy-group-name': QOS_POLICY_GROUP_NAME,
|
||||
'style-extended': FLEXVOL_STYLE_EXTENDED,
|
||||
})
|
||||
@ -2385,6 +2388,7 @@ VOLUME_GET_ITER_FLEXGROUP_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
|
||||
</volume-id-attributes>
|
||||
<volume-space-attributes>
|
||||
<size>%(size)s</size>
|
||||
<size-used>%(size-used)s</size-used>
|
||||
</volume-space-attributes>
|
||||
<volume-qos-attributes>
|
||||
<policy-group-name>%(qos-policy-group-name)s</policy-group-name>
|
||||
@ -2398,6 +2402,7 @@ VOLUME_GET_ITER_FLEXGROUP_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
|
||||
'vserver': VSERVER_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'size': SHARE_SIZE,
|
||||
'size-used': SHARE_USED_SIZE,
|
||||
'qos-policy-group-name': QOS_POLICY_GROUP_NAME,
|
||||
'style-extended': FLEXGROUP_STYLE_EXTENDED,
|
||||
})
|
||||
@ -2417,6 +2422,7 @@ VOLUME_GET_ITER_NO_QOS_RESPONSE = etree.XML("""
|
||||
</volume-id-attributes>
|
||||
<volume-space-attributes>
|
||||
<size>%(size)s</size>
|
||||
<size-used>%(size-used)s</size-used>
|
||||
</volume-space-attributes>
|
||||
</volume-attributes>
|
||||
</attributes-list>
|
||||
@ -2427,6 +2433,7 @@ VOLUME_GET_ITER_NO_QOS_RESPONSE = etree.XML("""
|
||||
'vserver': VSERVER_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'size': SHARE_SIZE,
|
||||
'size-used': SHARE_USED_SIZE,
|
||||
'style-extended': FLEXVOL_STYLE_EXTENDED,
|
||||
})
|
||||
|
||||
@ -3754,7 +3761,8 @@ GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES = {
|
||||
"path": VOLUME_JUNCTION_PATH
|
||||
},
|
||||
"space": {
|
||||
"size": 21474836480
|
||||
"size": 21474836480,
|
||||
'used': SHARE_USED_SIZE,
|
||||
},
|
||||
}
|
||||
],
|
||||
@ -4796,7 +4804,8 @@ FAKE_VOLUME_MANAGE = {
|
||||
}
|
||||
},
|
||||
'space': {
|
||||
'size': SHARE_SIZE
|
||||
'size': SHARE_SIZE,
|
||||
'used': SHARE_USED_SIZE,
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -4299,6 +4299,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
},
|
||||
'volume-space-attributes': {
|
||||
'size': None,
|
||||
'size-used': None,
|
||||
},
|
||||
'volume-qos-attributes': {
|
||||
'policy-group-name': None,
|
||||
@ -4315,6 +4316,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
'type': 'rw',
|
||||
'style': 'flex',
|
||||
'size': fake.SHARE_SIZE,
|
||||
'size-used': fake.SHARE_USED_SIZE,
|
||||
'owning-vserver-name': fake.VSERVER_NAME,
|
||||
'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME,
|
||||
'style-extended': (fake.FLEXGROUP_STYLE_EXTENDED
|
||||
@ -4358,6 +4360,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
},
|
||||
'volume-space-attributes': {
|
||||
'size': None,
|
||||
'size-used': None,
|
||||
},
|
||||
'volume-qos-attributes': {
|
||||
'policy-group-name': None,
|
||||
@ -4374,6 +4377,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
'type': 'rw',
|
||||
'style': 'flex',
|
||||
'size': fake.SHARE_SIZE,
|
||||
'size-used': fake.SHARE_USED_SIZE,
|
||||
'owning-vserver-name': fake.VSERVER_NAME,
|
||||
'qos-policy-group-name': None,
|
||||
'style-extended': fake.FLEXVOL_STYLE_EXTENDED,
|
||||
@ -7927,8 +7931,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
|
||||
self.client.create_snapmirror_policy(
|
||||
fake.SNAPMIRROR_POLICY_NAME, discard_network_info=discard_network,
|
||||
preserve_snapshots=preserve_snapshots)
|
||||
|
||||
snapmirror_label="backup", preserve_snapshots=preserve_snapshots)
|
||||
expected_create_api_args = {
|
||||
'policy-name': fake.SNAPMIRROR_POLICY_NAME,
|
||||
'type': 'async_mirror',
|
||||
@ -7944,8 +7947,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
if preserve_snapshots:
|
||||
expected_add_rules = {
|
||||
'policy-name': fake.SNAPMIRROR_POLICY_NAME,
|
||||
'snapmirror-label': 'all_source_snapshots',
|
||||
'keep': '1',
|
||||
'snapmirror-label': 'backup',
|
||||
'keep': 1,
|
||||
'preserve': 'false'
|
||||
}
|
||||
expected_calls.append(mock.call('snapmirror-policy-add-rule',
|
||||
@ -9160,3 +9163,52 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
self.client.configure_active_directory,
|
||||
fake.CIFS_SECURITY_SERVICE,
|
||||
fake.VSERVER_NAME)
|
||||
|
||||
def test_snapmirror_restore_vol(self):
|
||||
self.mock_object(self.client, 'send_request')
|
||||
self.client.snapmirror_restore_vol(source_path=fake.SM_SOURCE_PATH,
|
||||
dest_path=fake.SM_DEST_PATH,
|
||||
source_snapshot=fake.SNAPSHOT_NAME,
|
||||
)
|
||||
snapmirror_restore_args = {
|
||||
'source-location': fake.SM_SOURCE_PATH,
|
||||
'destination-location': fake.SM_DEST_PATH,
|
||||
'source-snapshot': fake.SNAPSHOT_NAME,
|
||||
|
||||
}
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('snapmirror-restore', snapmirror_restore_args)])
|
||||
|
||||
@ddt.data({'snapmirror_label': None, 'newer_than': '2345'},
|
||||
{'snapmirror_label': "fake_backup", 'newer_than': None})
|
||||
@ddt.unpack
|
||||
def test_list_volume_snapshots(self, snapmirror_label, newer_than):
|
||||
print(f"snapmirror_label: {snapmirror_label}")
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE)
|
||||
self.mock_object(self.client,
|
||||
'send_iter_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
result = self.client.list_volume_snapshots(
|
||||
fake.SHARE_NAME,
|
||||
snapmirror_label=snapmirror_label,
|
||||
newer_than=newer_than)
|
||||
snapshot_get_iter_args = {
|
||||
'query': {
|
||||
'snapshot-info': {
|
||||
'volume': fake.SHARE_NAME,
|
||||
},
|
||||
},
|
||||
}
|
||||
if newer_than:
|
||||
snapshot_get_iter_args['query']['snapshot-info'][
|
||||
'access-time'] = '>' + newer_than
|
||||
if snapmirror_label:
|
||||
snapshot_get_iter_args['query']['snapshot-info'][
|
||||
'snapmirror-label'] = snapmirror_label
|
||||
self.client.send_iter_request.assert_has_calls([
|
||||
mock.call('snapshot-get-iter', snapshot_get_iter_args)])
|
||||
|
||||
expected = [fake.SNAPSHOT_NAME]
|
||||
self.assertEqual(expected, result)
|
||||
|
@ -650,7 +650,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
|
||||
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
|
||||
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']}, )
|
||||
@ddt.unpack
|
||||
def test_get_aggregate_disk_types(self, types, expected):
|
||||
|
||||
@ -945,9 +945,10 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
'type': fake_volume.get('type', ''),
|
||||
'style': fake_volume.get('style', ''),
|
||||
'size': fake_volume.get('space', {}).get('size', ''),
|
||||
'size-used': fake_volume.get('space', {}).get('used', ''),
|
||||
'qos-policy-group-name': fake_volume.get('qos', {})
|
||||
.get('policy', {})
|
||||
.get('name'),
|
||||
.get('policy', {})
|
||||
.get('name'),
|
||||
'style-extended': fake_volume.get('style', '')
|
||||
}
|
||||
|
||||
@ -1500,7 +1501,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
api_response = fake.EXPORT_POLICY_REST
|
||||
|
||||
mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(
|
||||
return_value=api_response))
|
||||
return_value=api_response))
|
||||
|
||||
if not api_response.get('records'):
|
||||
return
|
||||
@ -1577,7 +1578,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
mock_send_request.assert_has_calls([
|
||||
mock.call('/storage/qos/policies', 'get', query=query),
|
||||
mock.call(f'/storage/qos/policies/{uuid}', 'patch',
|
||||
body=body),
|
||||
body=body),
|
||||
])
|
||||
|
||||
def test_qos_policy_group_get(self):
|
||||
@ -1592,7 +1593,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
'vserver': qos_policy.get('svm', {}).get('name'),
|
||||
'max-throughput': max_throughput if max_throughput else None,
|
||||
'num-workloads': int(qos_policy.get('object_count')),
|
||||
}
|
||||
}
|
||||
|
||||
query = {
|
||||
'name': qos_policy_group_name,
|
||||
@ -1967,7 +1968,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
mock_get_unique_volume = self.mock_object(
|
||||
self.client, "_get_volume_by_args",
|
||||
mock.Mock(return_value=fake_resp_vol)
|
||||
)
|
||||
)
|
||||
mock_send_request = self.mock_object(
|
||||
self.client, 'send_request',
|
||||
mock.Mock(return_value=fake.VOLUME_LIST_SIMPLE_RESPONSE_REST))
|
||||
@ -1987,7 +1988,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
mock_get_unique_volume = self.mock_object(
|
||||
self.client, "_get_volume_by_args",
|
||||
mock.Mock(return_value=fake_resp_vol)
|
||||
)
|
||||
)
|
||||
mock_send_request = self.mock_object(
|
||||
self.client, 'send_request',
|
||||
mock.Mock(return_value=fake.VOLUME_LIST_SIMPLE_RESPONSE_REST))
|
||||
@ -2922,9 +2923,9 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
fake.SNAPMIRROR_GET_ITER_RESPONSE_REST,
|
||||
{
|
||||
"job":
|
||||
{
|
||||
"uuid": fake.FAKE_UUID
|
||||
},
|
||||
{
|
||||
"uuid": fake.FAKE_UUID
|
||||
},
|
||||
"num_records": 1
|
||||
}
|
||||
]
|
||||
@ -3164,7 +3165,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
enable_tunneling=False),
|
||||
mock.call(f'/protocols/fpolicy/{svm_id}/policies'
|
||||
f'/{fake.FPOLICY_POLICY_NAME}', 'patch')
|
||||
])
|
||||
])
|
||||
|
||||
@ddt.data([fake.NO_RECORDS_RESPONSE_REST, None],
|
||||
[fake.SVMS_LIST_SIMPLE_RESPONSE_REST,
|
||||
@ -3498,7 +3499,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
"vserver": "fake_svm",
|
||||
"volume": "fake_vol",
|
||||
"destination_vserver": "fake_svm_2"
|
||||
}
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
"/private/cli/volume/rehost", 'post', body=body)
|
||||
|
||||
@ -4310,7 +4311,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
fake_response = copy.deepcopy(fake.PREFERRED_DC_REST)
|
||||
fake_ss = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE)
|
||||
self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(return_value=fake_response))
|
||||
mock.Mock(return_value=fake_response))
|
||||
self.client.remove_preferred_dcs(fake_ss, svm_uuid)
|
||||
query = {
|
||||
'fqdn': fake.LDAP_AD_SECURITY_SERVICE.get('domain'),
|
||||
@ -4327,7 +4328,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
fake_response = copy.deepcopy(fake.PREFERRED_DC_REST)
|
||||
fake_ss = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE)
|
||||
self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(return_value=fake_response))
|
||||
mock.Mock(return_value=fake_response))
|
||||
self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(side_effect=netapp_api.api.NaApiError))
|
||||
self.assertRaises(netapp_api.api.NaApiError,
|
||||
@ -4386,7 +4387,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
"peer": {
|
||||
"svm": {
|
||||
"name": fake.VSERVER_PEER_NAME,
|
||||
}
|
||||
}
|
||||
}
|
||||
}],
|
||||
}
|
||||
@ -4631,7 +4632,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
|
||||
mock_ports = (
|
||||
self.mock_object(self.client, 'get_node_data_ports', mock.Mock(
|
||||
return_value=fake.REST_SPEED_SORTED_PORTS)))
|
||||
return_value=fake.REST_SPEED_SORTED_PORTS)))
|
||||
|
||||
test_result = self.client.list_node_data_ports(fake.NODE_NAME)
|
||||
|
||||
@ -5052,7 +5053,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
result = self.client.get_nfs_config(['tcp-max-xfer-size',
|
||||
'udp-max-xfer-size'],
|
||||
'udp-max-xfer-size'],
|
||||
fake.VSERVER_NAME)
|
||||
expected = {
|
||||
'tcp-max-xfer-size': '65536',
|
||||
@ -6475,9 +6476,10 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
'type': fake_volume.get('type', ''),
|
||||
'style': fake_volume.get('style', ''),
|
||||
'size': fake_volume.get('space', {}).get('size', ''),
|
||||
'size-used': fake_volume.get('space', {}).get('used', ''),
|
||||
'qos-policy-group-name': fake_volume.get('qos', {})
|
||||
.get('policy', {})
|
||||
.get('name', ''),
|
||||
.get('policy', {})
|
||||
.get('name', ''),
|
||||
'style-extended': fake_volume.get('style', '')
|
||||
}
|
||||
result = self.client.get_volume(fake.VOLUME_NAMES[0])
|
||||
@ -6633,7 +6635,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
fake_response = [fake.PREFERRED_DC_REST,
|
||||
netapp_api.api.NaApiError]
|
||||
self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(side_effect=fake_response))
|
||||
mock.Mock(side_effect=fake_response))
|
||||
self.assertRaises(exception.NetAppException,
|
||||
self.client.remove_preferred_dcs,
|
||||
fake.LDAP_AD_SECURITY_SERVICE,
|
||||
@ -6809,7 +6811,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
self.mock_object(self.client,
|
||||
'send_request',
|
||||
mock.Mock(side_effect=self._mock_api_error(
|
||||
code=return_code)))
|
||||
code=return_code)))
|
||||
self.client.set_nfs_export_policy_for_volume(
|
||||
fake.VOLUME_NAMES[0], fake.EXPORT_POLICY_NAME)
|
||||
|
||||
@ -6877,3 +6879,77 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
self.client.configure_active_directory,
|
||||
fake_security,
|
||||
fake.VSERVER_NAME)
|
||||
|
||||
def test_snapmirror_restore_vol(self):
|
||||
uuid = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST["uuid"]
|
||||
body = {
|
||||
"destination": {"path": fake.SM_DEST_PATH},
|
||||
"source_snapshot": fake.SNAPSHOT_NAME
|
||||
}
|
||||
snapmirror_info = [{'destination-vserver': "fake_des_vserver",
|
||||
'destination-volume': "fake_des_vol",
|
||||
'relationship-status': "idle",
|
||||
'uuid': uuid}]
|
||||
|
||||
self.mock_object(self.client, 'get_snapmirror_destinations',
|
||||
mock.Mock(return_value=snapmirror_info))
|
||||
self.mock_object(self.client, 'send_request')
|
||||
self.client.snapmirror_restore_vol(source_path=fake.SM_SOURCE_PATH,
|
||||
dest_path=fake.SM_DEST_PATH,
|
||||
source_snapshot=fake.SNAPSHOT_NAME)
|
||||
self.client.send_request.assert_called_once_with(
|
||||
f'/snapmirror/relationships/{uuid}/restore', 'post', body=body)
|
||||
|
||||
@ddt.data({'snapmirror_label': None, 'newer_than': '2345'},
|
||||
{'snapmirror_label': "fake_backup", 'newer_than': None})
|
||||
@ddt.unpack
|
||||
def test_list_volume_snapshots(self, snapmirror_label, newer_than):
|
||||
fake_response = fake.SNAPSHOTS_REST_RESPONSE
|
||||
api_response = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST
|
||||
self.mock_object(self.client,
|
||||
'_get_volume_by_args',
|
||||
mock.Mock(return_value=api_response))
|
||||
mock_request = self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(return_value=fake_response))
|
||||
self.client.list_volume_snapshots(fake.SHARE_NAME,
|
||||
snapmirror_label=snapmirror_label,
|
||||
newer_than=newer_than)
|
||||
uuid = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST["uuid"]
|
||||
query = {}
|
||||
if snapmirror_label:
|
||||
query = {
|
||||
'snapmirror_label': snapmirror_label,
|
||||
}
|
||||
if newer_than:
|
||||
query['create_time'] = '>' + newer_than
|
||||
|
||||
mock_request.assert_called_once_with(
|
||||
f'/storage/volumes/{uuid}/snapshots/',
|
||||
'get', query=query)
|
||||
|
||||
@ddt.data(('vault', False, True), (None, False, False))
|
||||
@ddt.unpack
|
||||
def test_create_snapmirror_policy_rest(self, policy_type,
|
||||
discard_network_info,
|
||||
preserve_snapshots):
|
||||
fake_response = fake.SNAPSHOTS_REST_RESPONSE
|
||||
self.mock_object(self.client, 'send_request',
|
||||
mock.Mock(return_value=fake_response))
|
||||
policy_name = fake.SNAPMIRROR_POLICY_NAME
|
||||
self.client.create_snapmirror_policy(
|
||||
policy_name, policy_type=policy_type,
|
||||
discard_network_info=discard_network_info,
|
||||
preserve_snapshots=preserve_snapshots,
|
||||
snapmirror_label='backup',
|
||||
keep=30)
|
||||
if policy_type == "vault":
|
||||
body = {"name": policy_name, "type": "async",
|
||||
"create_snapshot_on_source": False}
|
||||
else:
|
||||
body = {"name": policy_name, "type": policy_type}
|
||||
if discard_network_info:
|
||||
body["exclude_network_config"] = {'svmdr-config-obj': 'network'}
|
||||
if preserve_snapshots:
|
||||
body["retention"] = [{"label": 'backup', "count": 30}]
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/snapmirror/policies/', 'post', body=body)
|
||||
|
@ -1272,3 +1272,43 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
|
||||
|
||||
mock_src_client.release_snapmirror_vol.assert_called()
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_most_available_aggr_of_vserver(self):
|
||||
vserver_client = mock.Mock()
|
||||
aggr_space_attr = {fake.AGGREGATE: {'available': 5678},
|
||||
'aggr2': {'available': 2024}}
|
||||
self.mock_object(vserver_client,
|
||||
'get_vserver_aggregate_capacities',
|
||||
mock.Mock(return_value=aggr_space_attr))
|
||||
result = self.dm_session.get_most_available_aggr_of_vserver(
|
||||
vserver_client)
|
||||
self.assertEqual(result, fake.AGGREGATE)
|
||||
|
||||
def test_initialize_and_wait_snapmirror_vol(self):
|
||||
vserver_client = mock.Mock()
|
||||
snapmirror_info = [{'source-vserver': fake.VSERVER1,
|
||||
'source-volume': "fake_source_vol",
|
||||
'destination-vserver': fake.VSERVER2,
|
||||
'destination-volume': "fake_des_vol",
|
||||
'relationship-status': "idle"}]
|
||||
self.mock_object(vserver_client,
|
||||
'get_snapmirrors',
|
||||
mock.Mock(return_value=snapmirror_info))
|
||||
|
||||
(self.dm_session.
|
||||
initialize_and_wait_snapmirror_vol(vserver_client,
|
||||
fake.VSERVER1,
|
||||
fake.FLEXVOL_NAME,
|
||||
fake.VSERVER2,
|
||||
fake.FLEXVOL_NAME_1,
|
||||
source_snapshot=None,
|
||||
transfer_priority=None,
|
||||
timeout=300))
|
||||
(vserver_client.initialize_snapmirror_vol.
|
||||
assert_called_once_with(mock.ANY,
|
||||
mock.ANY,
|
||||
mock.ANY,
|
||||
mock.ANY,
|
||||
source_snapshot=mock.ANY,
|
||||
transfer_priority=mock.ANY,
|
||||
))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,6 +35,8 @@ from manila.share import share_types
|
||||
from manila.share import utils as share_utils
|
||||
from manila import test
|
||||
from manila.tests.share.drivers.netapp.dataontap.client import fakes as c_fake
|
||||
from manila.tests.share.drivers.netapp.dataontap.cluster_mode.test_lib_base\
|
||||
import _get_config
|
||||
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
|
||||
|
||||
|
||||
@ -4078,3 +4080,42 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.library._build_model_update.assert_called_once_with(
|
||||
fake_current_network_allocations, fake_new_network_allocations,
|
||||
export_locations=None)
|
||||
|
||||
def test__get_backup_vserver(self):
|
||||
mock_dest_client = mock.Mock()
|
||||
self.mock_object(self.library,
|
||||
'_get_backend',
|
||||
mock.Mock(return_value=fake.BACKEND_NAME))
|
||||
self.mock_object(data_motion,
|
||||
'get_backend_configuration',
|
||||
mock.Mock(return_value=_get_config()))
|
||||
self.mock_object(self.library,
|
||||
'_get_api_client_for_backend',
|
||||
mock.Mock(return_value=mock_dest_client))
|
||||
self.mock_object(mock_dest_client,
|
||||
'list_non_root_aggregates',
|
||||
mock.Mock(return_value=['aggr1', 'aggr2']))
|
||||
self.mock_object(mock_dest_client,
|
||||
'create_vserver',
|
||||
mock.Mock(side_effect=netapp_api.NaApiError(
|
||||
message='Vserver name is already used by another'
|
||||
' Vserver')))
|
||||
self.library._get_backup_vserver(fake.SHARE_BACKUP, fake.SHARE_SERVER)
|
||||
|
||||
def test__delete_backup_vserver(self):
|
||||
mock_api_client = mock.Mock()
|
||||
self.mock_object(self.library,
|
||||
'_get_backend',
|
||||
mock.Mock(return_value=fake.BACKEND_NAME))
|
||||
self.mock_object(self.library,
|
||||
'_get_api_client_for_backend',
|
||||
mock.Mock(return_value=mock_api_client))
|
||||
des_vserver = fake.VSERVER2
|
||||
msg = (f"Cannot delete Vserver. Vserver {des_vserver} "
|
||||
f"has shares.")
|
||||
self.mock_object(mock_api_client,
|
||||
'delete_vserver',
|
||||
mock.Mock(
|
||||
side_effect=exception.NetAppException(
|
||||
message=msg)))
|
||||
self.library._delete_backup_vserver(fake.SHARE_BACKUP, des_vserver)
|
||||
|
@ -21,10 +21,13 @@ import ddt
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
from manila import test
|
||||
from manila.tests.share.drivers.netapp.dataontap.cluster_mode.test_lib_base\
|
||||
import _get_config
|
||||
import manila.tests.share.drivers.netapp.dataontap.fakes as fake
|
||||
|
||||
|
||||
@ -295,3 +298,26 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
result = self.library.get_admin_network_allocations_number()
|
||||
|
||||
self.assertEqual(0, result)
|
||||
|
||||
def test__get_backup_vserver(self):
|
||||
self.mock_object(self.library,
|
||||
'_get_backend',
|
||||
mock.Mock(return_value=fake.BACKEND_NAME))
|
||||
self.mock_object(data_motion,
|
||||
'get_backend_configuration',
|
||||
mock.Mock(return_value=_get_config()))
|
||||
self.library._get_backup_vserver(fake.SHARE_BACKUP)
|
||||
|
||||
def test__get_backup_vserver_with_share_server_negative(self):
|
||||
self.mock_object(self.library,
|
||||
'_get_backend',
|
||||
mock.Mock(return_value=fake.BACKEND_NAME))
|
||||
self.mock_object(data_motion,
|
||||
'get_backend_configuration',
|
||||
mock.Mock(return_value=_get_config()))
|
||||
self.assertRaises(
|
||||
exception.InvalidParameterValue,
|
||||
self.library._get_backup_vserver,
|
||||
fake.SHARE_BACKUP,
|
||||
fake.SHARE_SERVER,
|
||||
)
|
||||
|
@ -40,6 +40,7 @@ SHARE_NAME = 'share_7cf7c200_d3af_4e05_b87e_9167c95dfcad'
|
||||
SHARE_NAME2 = 'share_d24e7257_124e_4fb6_b05b_d384f660bc85'
|
||||
SHARE_INSTANCE_NAME = 'share_d24e7257_124e_4fb6_b05b_d384f660bc85'
|
||||
FLEXVOL_NAME = 'fake_volume'
|
||||
FLEXVOL_NAME_1 = 'fake_volume_1'
|
||||
JUNCTION_PATH = '/%s' % FLEXVOL_NAME
|
||||
EXPORT_LOCATION = '%s:%s' % (HOST_NAME, JUNCTION_PATH)
|
||||
SNAPSHOT_NAME = 'fake_snapshot'
|
||||
@ -112,6 +113,7 @@ FPOLICY_EXT_TO_INCLUDE = 'avi'
|
||||
FPOLICY_EXT_TO_INCLUDE_LIST = ['avi']
|
||||
FPOLICY_EXT_TO_EXCLUDE = 'jpg,mp3'
|
||||
FPOLICY_EXT_TO_EXCLUDE_LIST = ['jpg', 'mp3']
|
||||
BACKUP_TYPE = "fake_backup_type"
|
||||
|
||||
JOB_ID = '123'
|
||||
JOB_STATE = 'success'
|
||||
@ -1869,6 +1871,24 @@ NEW_NETWORK_ALLOCATIONS = {
|
||||
'network_allocations': USER_NETWORK_ALLOCATIONS
|
||||
}
|
||||
|
||||
SHARE_BACKUP = {
|
||||
'id': '242ff47e-518d-4b07-b3c3-0a51e6744149',
|
||||
'share_id': 'd0a424c3-fee9-4781-9d4a-2c48a63386aa',
|
||||
'size': SHARE_SIZE,
|
||||
'host': MANILA_HOST_NAME,
|
||||
'display_name': 'fake_backup',
|
||||
'backup_options': {'backend': BACKEND_NAME, 'backup_type': BACKUP_TYPE},
|
||||
}
|
||||
|
||||
SNAP_MIRROR_INFO = {'source-vserver': VSERVER1,
|
||||
'source-volume': FLEXVOL_NAME,
|
||||
'destination-vserver': VSERVER2,
|
||||
'destination-volume': FLEXVOL_NAME_1,
|
||||
'relationship-status': "idle",
|
||||
'last-transfer-type': "update",
|
||||
}
|
||||
|
||||
|
||||
SERVER_MODEL_UPDATE = {
|
||||
'server_details': {
|
||||
'ports': '{"%s": "%s", "%s": "%s"}' % (
|
||||
|
@ -25,6 +25,7 @@ def create_configuration():
|
||||
config.append_config_values(na_opts.netapp_transport_opts)
|
||||
config.append_config_values(na_opts.netapp_basicauth_opts)
|
||||
config.append_config_values(na_opts.netapp_provisioning_opts)
|
||||
config.append_config_values(na_opts.netapp_backup_opts)
|
||||
return config
|
||||
|
||||
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
The NetApp ONTAP driver now supports driver-advantaged share backup. NetApp
|
||||
SnapVault technology is used to create and restore backups for NetApp ONTAP
|
||||
shares. Backup delete workflow just deletes the transferred snapshots from
|
||||
destination backup volume. How to get the config data for backup, refer
|
||||
https://etherpad.opendev.org/p/manila-share-backup link.
|
Loading…
Reference in New Issue
Block a user