NetApp: Fix issue with busy snapshot deletion.

This fixes the issue of deleting temporary snapshots created during the
consistency group creation process. These temporary snapshots may not be
deleted if the system is under load and the temporary snapshot remains
in a "busy" state after the consistency group creation process is
otherwise complete.

This change also reduces lines of code by implementing a manager for the
creation of FixedIntervalLoopingCall instances in the ONTAP drivers.
This looping call manager also provides the ability to start all
registered looping calls after the driver has been properly
initialized. The looping call manager also makes it easy to ensure that
FixedIntervalLoopingCall instances are not instantiated in Unit Tests.

Closes-Bug: #1596679
Change-Id: I13096a8c94a32e68814f81900032dbcc6a4a9806
This commit is contained in:
Chuck Fouts 2016-06-12 17:05:24 -04:00
parent 4d209fd966
commit 388e52ce23
21 changed files with 831 additions and 191 deletions

View File

@ -251,6 +251,7 @@ SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE = etree.XML("""
<name>%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
@ -283,6 +284,39 @@ SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE = etree.XML("""
<name>%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_busy_snapshot</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>

View File

@ -17,6 +17,7 @@
import uuid
import ddt
from lxml import etree
import mock
import paramiko
@ -30,6 +31,7 @@ from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
@ -39,6 +41,7 @@ CONNECTION_INFO = {'hostname': 'hostname',
'password': 'passw0rd'}
@ddt.ddt
class NetApp7modeClientTestCase(test.TestCase):
def setUp(self):
@ -816,3 +819,39 @@ class NetApp7modeClientTestCase(test.TestCase):
self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot,
expected_vol_name, expected_snapshot_name)
@ddt.data({
'mock_return':
fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE,
'expected': [{
'name': client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME,
'instance_id': 'abcd-ef01-2345-6789',
'volume_name': fake.SNAPSHOT['volume_id'],
}]
}, {
'mock_return': fake_client.NO_RECORDS_RESPONSE,
'expected': [],
}, {
'mock_return':
fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY,
'expected': [],
})
@ddt.unpack
def test_get_snapshots_marked_for_deletion(self, mock_return, expected):
api_response = netapp_api.NaElement(mock_return)
volume_list = [fake.SNAPSHOT['volume_id']]
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_snapshots_marked_for_deletion(volume_list)
api_args = {
'target-name': fake.SNAPSHOT['volume_id'],
'target-type': 'volume',
'terse': 'true',
}
self.client.send_request.assert_called_once_with(
'snapshot-list-info', api_args)
self.assertListEqual(expected, result)

View File

@ -591,3 +591,20 @@ class NetAppBaseClientTestCase(test.TestCase):
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
]
mock_get_snapshot.assert_has_calls(calls)
def test_rename_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client.rename_snapshot(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT_NAME,
client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME)
api_args = {
'volume': fake.SNAPSHOT['volume_id'],
'current-name': fake.SNAPSHOT_NAME,
'new-name':
client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME,
}
self.client.send_request.assert_called_once_with(
'snapshot-rename', api_args)

View File

@ -23,6 +23,7 @@ from lxml import etree
import mock
import paramiko
import six
import time
from cinder import exception
from cinder import ssh_utils
@ -31,6 +32,7 @@ from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp import utils as netapp_utils
@ -3153,6 +3155,8 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertEqual(expected_prov_opts, actual_prov_opts)
def test_wait_for_busy_snapshot(self):
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
mock.Mock(return_value=fake.SNAPSHOT)
@ -3162,3 +3166,66 @@ class NetAppCmodeClientTestCase(test.TestCase):
mock_get_snapshot.assert_called_once_with(fake.FLEXVOL,
fake.SNAPSHOT_NAME)
def test_wait_for_busy_snapshot_raise_exception(self):
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
BUSY_SNAPSHOT = dict(fake.SNAPSHOT)
BUSY_SNAPSHOT['busy'] = True
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
mock.Mock(return_value=BUSY_SNAPSHOT)
)
self.assertRaises(exception.SnapshotIsBusy,
self.client.wait_for_busy_snapshot,
fake.FLEXVOL, fake.SNAPSHOT_NAME)
calls = [
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
]
mock_get_snapshot.assert_has_calls(calls)
@ddt.data({
'mock_return':
fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE,
'expected': [{
'name': fake.SNAPSHOT_NAME,
'instance_id': 'abcd-ef01-2345-6789',
'volume_name': fake.SNAPSHOT['volume_id'],
}]
}, {
'mock_return': fake_client.NO_RECORDS_RESPONSE,
'expected': [],
})
@ddt.unpack
def test_get_snapshots_marked_for_deletion(self, mock_return, expected):
api_response = netapp_api.NaElement(mock_return)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_snapshots_marked_for_deletion()
api_args = {
'query': {
'snapshot-info': {
'name': client_base.DELETED_PREFIX + '*',
'vserver': self.vserver,
'busy': 'false'
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'snapshot-instance-uuid': None,
}
},
}
self.client.send_request.assert_called_once_with(
'snapshot-get-iter', api_args)
self.assertListEqual(expected, result)

View File

@ -119,9 +119,12 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = ['open1', 'open2']
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
self.library.check_for_setup_error()
mock_add_looping_tasks.assert_called_once_with()
super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
@ -746,3 +749,18 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_add_looping_tasks')
self.library._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
result = self.library._get_backing_flexvol_names()
self.assertEqual('vol2', result[2])

View File

@ -28,7 +28,6 @@ import uuid
import ddt
import mock
from oslo_log import versionutils
from oslo_service import loopingcall
from oslo_utils import units
import six
@ -39,6 +38,7 @@ from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
@ -769,44 +769,31 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
mock_start_periodic_tasks = self.mock_object(
self.library, '_start_periodic_tasks')
mock_looping_start_tasks = self.mock_object(
self.library.loopingcalls, 'start_tasks')
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
mock_start_periodic_tasks.assert_called_once_with()
mock_looping_start_tasks.assert_called_once_with()
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
def test_check_for_setup_error_no_os_host(self):
mock_start_tasks = self.mock_object(
self.library.loopingcalls, 'start_tasks')
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
mock_start_periodic_tasks = self.mock_object(
self.library, '_start_periodic_tasks')
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
mock_start_periodic_tasks.assert_called_once_with()
def test_start_periodic_tasks(self):
mock_handle_housekeeping_tasks = self.mock_object(
self.library, '_handle_housekeeping_tasks')
housekeeping_periodic_task = mock.Mock()
mock_loopingcall = self.mock_object(
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(return_value=housekeeping_periodic_task))
self.library._start_periodic_tasks()
mock_loopingcall.assert_called_once_with(
mock_handle_housekeeping_tasks)
self.assertTrue(housekeeping_periodic_task.start.called)
mock_start_tasks.assert_called_once_with()
def test_delete_volume(self):
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
@ -1372,6 +1359,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
@ -1383,6 +1372,37 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME,
source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
mock_delete_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_create_cgsnapshot_busy_snapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_extract_host = self.mock_object(
volume_utils, 'extract_host',
mock.Mock(return_value=fake.POOL_NAME))
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name'])
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
mock_mark_snapshot_for_deletion = self.mock_object(
self.zapi_client, 'mark_snapshot_for_deletion')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
mock_extract_host.assert_called_once_with(
fake.CG_VOLUME['host'], level='pool')
self.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_lun.assert_called_once_with(
fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME,
source_snapshot=fake.CG_SNAPSHOT_ID)
mock_delete_snapshot.assert_not_called()
mock_mark_snapshot_for_deletion.assert_called_once_with(
fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_cgsnapshot(self):
@ -1500,3 +1520,37 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
def test_add_looping_tasks(self):
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')
mock_call = self.mock_object(
self.library, '_delete_snapshots_marked_for_deletion')
self.library._add_looping_tasks()
mock_add_task.assert_called_once_with(
mock_call,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
def test_delete_snapshots_marked_for_deletion(self):
snapshots = [{
'name': fake.SNAPSHOT_NAME,
'volume_name': fake.VOLUME['name']
}]
mock_get_backing_flexvol_names = self.mock_object(
self.library, '_get_backing_flexvol_names')
mock_get_backing_flexvol_names.return_value = [fake.VOLUME['name']]
mock_get_snapshots_marked = self.mock_object(
self.zapi_client, 'get_snapshots_marked_for_deletion')
mock_get_snapshots_marked.return_value = snapshots
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
self.library._delete_snapshots_marked_for_deletion()
mock_get_backing_flexvol_names.assert_called_once_with()
mock_get_snapshots_marked.assert_called_once_with(
[fake.VOLUME['name']])
mock_delete_snapshot.assert_called_once_with(
fake.VOLUME['name'], fake.SNAPSHOT_NAME)

View File

@ -20,7 +20,6 @@ Mock unit tests for the NetApp block storage C-mode library
import ddt
import mock
from oslo_service import loopingcall
from cinder import exception
from cinder import test
@ -34,6 +33,7 @@ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
from cinder.volume.drivers.netapp import utils as na_utils
@ -104,21 +104,28 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.library.ssc_library, 'check_api_permissions')
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
mock.Mock(return_value={'fake_map': None}))
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
self.library.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_api_permissions.assert_called_once_with()
self.assertEqual(1, mock_add_looping_tasks.call_count)
mock_get_pool_map.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.mock_object(block_base.NetAppBlockStorageLibrary,
'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.library.ssc_library, 'check_api_permissions')
self.mock_object(self.library, '_add_looping_tasks')
self.mock_object(
self.library, '_get_flexvol_to_pool_map',
mock.Mock(return_value={}))
@ -128,25 +135,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
mock_check_api_permissions.assert_called_once_with()
def test_start_periodic_tasks(self):
mock_update_ssc = self.mock_object(
self.library, '_update_ssc')
super_start_periodic_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_start_periodic_tasks')
update_ssc_periodic_task = mock.Mock()
mock_loopingcall = self.mock_object(
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(return_value=update_ssc_periodic_task))
self.library._start_periodic_tasks()
mock_loopingcall.assert_called_once_with(mock_update_ssc)
self.assertTrue(update_ssc_periodic_task.start.called)
mock_update_ssc.assert_called_once_with()
super_start_periodic_tasks.assert_called_once_with()
@ddt.data({'replication_enabled': True, 'failed_over': False},
{'replication_enabled': True, 'failed_over': True},
{'replication_enabled': False, 'failed_over': False})
@ -158,12 +146,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
mock.Mock(return_value=fake_utils.SSC.keys()))
self.library.replication_enabled = replication_enabled
self.library.failed_over = failed_over
super_handle_housekeeping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_handle_housekeeping_tasks')
self.library._handle_housekeeping_tasks()
super_handle_housekeeping_tasks.assert_called_once_with()
(self.zapi_client.remove_unused_qos_policy_groups.
assert_called_once_with())
if replication_enabled and not failed_over:
@ -706,3 +691,31 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual('dev1', self.library.failed_over_backend_name)
self.assertEqual('dev1', actual_active)
self.assertEqual([], vol_updates)
def test_add_looping_tasks(self):
mock_update_ssc = self.mock_object(self.library, '_update_ssc')
mock_remove_unused_qos_policy_groups = self.mock_object(
self.zapi_client, 'remove_unused_qos_policy_groups')
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')
mock_super_add_looping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_add_looping_tasks')
self.library._add_looping_tasks()
mock_update_ssc.assert_called_once_with()
mock_add_task.assert_has_calls([
mock.call(mock_update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR),
mock.call(mock_remove_unused_qos_policy_groups,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)])
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
mock_ssc_library = self.mock_object(
self.library.ssc_library, 'get_ssc')
self.library._get_backing_flexvol_names()
mock_ssc_library.assert_called_once_with()

View File

@ -25,6 +25,7 @@ from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
@ -196,9 +197,37 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
]
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_backing_flexvol_names(hosts)
flexvols = self.driver._get_flexvol_names_from_hosts(hosts)
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)
def test_check_for_setup_error(self):
mock_get_ontapi_version = self.mock_object(
self.driver.zapi_client, 'get_ontapi_version')
mock_get_ontapi_version.return_value = ['1', '10']
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
mock_super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
self.driver.check_for_setup_error()
mock_get_ontapi_version.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
mock_super_check_for_setup_error.assert_called_once_with()
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_add_looping_tasks')
self.driver._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
result = self.driver._get_backing_flexvol_names()
self.assertEqual('path', result[0])

View File

@ -25,7 +25,6 @@ import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils
from oslo_service import loopingcall
from oslo_utils import units
import shutil
@ -38,6 +37,7 @@ from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
@ -99,33 +99,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual(expected_reserved_percentage,
round(result['reserved_percentage']))
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
nfs.NfsDriver, 'check_for_setup_error')
mock_start_periodic_tasks = self.mock_object(
self.driver, '_start_periodic_tasks')
self.driver.check_for_setup_error()
super_check_for_setup_error.assert_called_once_with()
mock_start_periodic_tasks.assert_called_once_with()
def test_start_periodic_tasks(self):
mock_handle_housekeeping_tasks = self.mock_object(
self.driver, '_handle_housekeeping_tasks')
housekeeping_periodic_task = mock.Mock()
mock_loopingcall = self.mock_object(
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(return_value=housekeeping_periodic_task))
self.driver._start_periodic_tasks()
mock_loopingcall.assert_called_once_with(
mock_handle_housekeeping_tasks)
self.assertTrue(housekeeping_periodic_task.start.called)
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
get_capacity = self.driver.zapi_client.get_flexvol_capacity
@ -402,36 +375,47 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver._update_volume_stats)
def test_copy_image_to_volume_base_exception(self):
updates = {
'name': fake.VOLUME_NAME,
'id': fake.VOLUME_ID,
'provider_location': fake.PROVIDER_LOCATION,
}
mock_info_log = self.mock_object(nfs_base.LOG, 'info')
fake_vol = fake_volume.fake_volume_obj(self.ctxt, **updates)
self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume',
mock.Mock(side_effect=exception.NfsException))
self.assertRaises(exception.NfsException,
self.driver.copy_image_to_volume,
'fake_context', fake_vol,
'fake_context', fake.NFS_VOLUME,
'fake_img_service', fake.IMAGE_FILE_ID)
mock_info_log.assert_not_called()
@ddt.data(None, Exception)
def test_copy_image_to_volume(self, exc):
def test_copy_image_to_volume(self):
mock_log = self.mock_object(nfs_base, 'LOG')
mock_copy_image = self.mock_object(
remotefs.RemoteFSDriver, 'copy_image_to_volume')
mock_register_image = self.mock_object(
self.driver, '_register_image_in_cache')
self.driver.copy_image_to_volume('fake_context',
fake.NFS_VOLUME,
'fake_img_service',
fake.IMAGE_FILE_ID)
mock_copy_image.assert_called_once_with(
'fake_context', fake.NFS_VOLUME, 'fake_img_service',
fake.IMAGE_FILE_ID)
self.assertEqual(1, mock_log.info.call_count)
mock_register_image.assert_called_once_with(
fake.NFS_VOLUME, fake.IMAGE_FILE_ID)
@ddt.data(None, Exception)
def test__register_image_in_cache(self, exc):
mock_log = self.mock_object(nfs_base, 'LOG')
self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume')
self.mock_object(self.driver, '_do_clone_rel_img_cache',
mock.Mock(side_effect=exc))
retval = self.driver.copy_image_to_volume(
'fake_context', fake.NFS_VOLUME, 'fake_img_service',
fake.IMAGE_FILE_ID)
retval = self.driver._register_image_in_cache(
fake.NFS_VOLUME, fake.IMAGE_FILE_ID)
self.assertIsNone(retval)
self.assertEqual(exc is not None, mock_log.warning.called)
self.assertEqual(2, mock_log.info.call_count)
self.assertEqual(1, mock_log.info.call_count)
@ddt.data(True, False)
def test_do_clone_rel_img_cache(self, path_exists):
@ -975,7 +959,10 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_create_consistencygroup_from_src(self):
mock_create_volume_from_snapshot = self.mock_object(
self.driver, 'create_volume_from_snapshot')
self.driver, 'create_volume_from_snapshot',
mock.Mock(return_value={
'provider_location': fake.PROVIDER_LOCATION
}))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
@ -985,11 +972,15 @@ class NetAppNfsDriverTestCase(test.TestCase):
mock_create_volume_from_snapshot.assert_called_once_with(
fake.VOLUME, fake.SNAPSHOT)
self.assertIsNone(model_update)
self.assertIsNone(volumes_model_update)
expected_update = [{
'id': fake.VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
self.assertEqual(expected_update, volumes_model_update)
def test_create_consistencygroup_from_src_source_vols(self):
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_backing_flexvol_names')
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
@ -1001,21 +992,25 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.CONSISTENCY_GROUP,
source_vols=[fake.CG_VOLUME]))
source_vols=[fake.NFS_VOLUME]))
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.CG_VOLUME['host']])
[fake.NFS_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake_snapshot_name)
mock_clone_backing_file.assert_called_once_with(
fake.CG_VOLUME['name'], fake.VOLUME['name'], fake.CG_VOLUME['id'],
source_snapshot=fake_snapshot_name)
fake.NFS_VOLUME['name'], fake.VOLUME['name'],
fake.NFS_VOLUME['id'], source_snapshot=fake_snapshot_name)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.assertIsNone(model_update)
self.assertIsNone(volumes_model_update)
expected_update = [{
'id': fake.NFS_VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
self.assertEqual(expected_update, volumes_model_update)
def test_create_consistencygroup_from_src_invalid_parms(self):
@ -1029,7 +1024,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_backing_flexvol_names')
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
@ -1051,6 +1046,36 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_create_cgsnapshot_busy_snapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name'])
mock_mark_snapshot_for_deletion = self.mock_object(
self.zapi_client, 'mark_snapshot_for_deletion')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_not_called()
mock_mark_snapshot_for_deletion.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(self.driver, '_delete_file',
mock.Mock(side_effect=Exception))
@ -1072,3 +1097,48 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual('deleted', volumes[0]['status'])
mock_delete_file.assert_called_once_with(
fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
nfs.NfsDriver, 'check_for_setup_error')
mock_start_tasks = self.mock_object(
self.driver.loopingcalls, 'start_tasks')
self.driver.check_for_setup_error()
super_check_for_setup_error.assert_called_once_with()
mock_start_tasks.assert_called_once_with()
def test_add_looping_tasks(self):
mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task')
mock_call = self.mock_object(
self.driver, '_delete_snapshots_marked_for_deletion')
self.driver._add_looping_tasks()
mock_add_task.assert_called_once_with(
mock_call,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
def test_delete_snapshots_marked_for_deletion(self):
snapshots = [{
'name': fake.SNAPSHOT_NAME,
'volume_name': fake.VOLUME['name']
}]
mock_get_flexvol_names = self.mock_object(
self.driver, '_get_backing_flexvol_names')
mock_get_flexvol_names.return_value = [fake.VOLUME['name']]
mock_get_snapshots_marked = self.mock_object(
self.zapi_client, 'get_snapshots_marked_for_deletion')
mock_get_snapshots_marked.return_value = snapshots
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
self.driver._delete_snapshots_marked_for_deletion()
mock_get_flexvol_names.assert_called_once_with()
mock_get_snapshots_marked.assert_called_once_with(
[fake.VOLUME['name']])
mock_delete_snapshot.assert_called_once_with(
fake.VOLUME['name'], fake.SNAPSHOT_NAME)

View File

@ -19,7 +19,6 @@ Mock unit tests for the NetApp cmode nfs storage driver
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_service import loopingcall
from oslo_utils import units
from cinder import exception
@ -36,6 +35,7 @@ from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
@ -384,30 +384,15 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.driver.ssc_library, 'check_api_permissions')
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
self.driver.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_api_permissions.assert_called_once_with()
def test_start_periodic_tasks(self):
mock_update_ssc = self.mock_object(
self.driver, '_update_ssc')
super_start_periodic_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_start_periodic_tasks')
update_ssc_periodic_task = mock.Mock()
mock_loopingcall = self.mock_object(
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(return_value=update_ssc_periodic_task))
self.driver._start_periodic_tasks()
mock_loopingcall.assert_called_once_with(mock_update_ssc)
self.assertTrue(update_ssc_periodic_task.start.called)
mock_update_ssc.assert_called_once_with()
super_start_periodic_tasks.assert_called_once_with()
self.assertEqual(1, mock_add_looping_tasks.call_count)
mock_add_looping_tasks.assert_called_once_with()
@ddt.data({'replication_enabled': True, 'failed_over': False},
{'replication_enabled': True, 'failed_over': True},
@ -420,12 +405,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock.Mock(return_value=fake_ssc.SSC.keys()))
self.driver.replication_enabled = replication_enabled
self.driver.failed_over = failed_over
super_handle_housekeeping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_handle_housekeeping_tasks')
self.driver._handle_housekeeping_tasks()
super_handle_housekeeping_tasks.assert_called_once_with()
(self.driver.zapi_client.remove_unused_qos_policy_groups.
assert_called_once_with())
if replication_enabled and not failed_over:
@ -909,6 +891,26 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
def test_add_looping_tasks(self):
mock_update_ssc = self.mock_object(self.driver, '_update_ssc')
mock_remove_unused_qos_policy_groups = self.mock_object(
self.driver.zapi_client, 'remove_unused_qos_policy_groups')
mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task')
mock_super_add_looping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_add_looping_tasks')
self.driver._add_looping_tasks()
mock_update_ssc.assert_called_once_with()
mock_add_task.assert_has_calls([
mock.call(mock_update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR),
mock.call(mock_remove_unused_qos_policy_groups,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)])
mock_super_add_looping_tasks.assert_called_once_with()
@ddt.data({'has_space': True, 'type_match': True, 'expected': True},
{'has_space': True, 'type_match': False, 'expected': False},
{'has_space': False, 'type_match': True, 'expected': False},
@ -1367,10 +1369,18 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_ssc.return_value = ssc
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_backing_flexvol_names(hosts)
flexvols = self.driver._get_flexvol_names_from_hosts(hosts)
mock_get_ssc.assert_called_once_with()
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)
def test_get_backing_flexvol_names(self):
mock_ssc_library = self.mock_object(
self.driver.ssc_library, 'get_ssc')
self.driver._get_backing_flexvol_names()
mock_ssc_library.assert_called_once_with()

View File

@ -0,0 +1,63 @@
# Copyright (c) 2016 Chuck Fouts. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_service import loopingcall
from cinder import test
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
class LoopingCallsTestCase(test.TestCase):
def setUp(self):
super(LoopingCallsTestCase, self).setUp()
self.mock_first_looping_task = mock.Mock()
self.mock_second_looping_task = mock.Mock()
self.mock_loopingcall = self.mock_object(
loopingcall,
'FixedIntervalLoopingCall',
mock.Mock(side_effect=[self.mock_first_looping_task,
self.mock_second_looping_task])
)
self.loopingcalls = loopingcalls.LoopingCalls()
def test_add_task(self):
interval = 3600
initial_delay = 5
self.loopingcalls.add_task(self.mock_first_looping_task, interval)
self.loopingcalls.add_task(
self.mock_second_looping_task, interval, initial_delay)
self.assertEqual(2, len(self.loopingcalls.tasks))
self.assertEqual(interval, self.loopingcalls.tasks[0].interval)
self.assertEqual(initial_delay,
self.loopingcalls.tasks[1].initial_delay)
def test_start_tasks(self):
interval = 3600
initial_delay = 5
self.loopingcalls.add_task(self.mock_first_looping_task, interval)
self.loopingcalls.add_task(
self.mock_second_looping_task, interval, initial_delay)
self.loopingcalls.start_tasks()
self.mock_first_looping_task.start.assert_called_once_with(
interval, 0)
self.mock_second_looping_task.start.assert_called_once_with(
interval, initial_delay)

View File

@ -119,8 +119,13 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise exception.NetAppDriverException(msg)
self._add_looping_tasks()
super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetAppBlockStorage7modeLibrary, self)._add_looping_tasks()
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
@ -444,3 +449,7 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
return (super(NetAppBlockStorage7modeLibrary, self)
._get_preferred_target_from_list(target_details_list))
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
return self.volume_list or []

View File

@ -32,7 +32,6 @@ import uuid
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
@ -41,13 +40,13 @@ from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes
class NetAppLun(object):
@ -118,6 +117,7 @@ class NetAppBlockStorageLibrary(object):
self.max_over_subscription_ratio = (
self.configuration.max_over_subscription_ratio)
self.reserved_percentage = self._get_reserved_percentage()
self.loopingcalls = loopingcalls.LoopingCalls()
def _get_reserved_percentage(self):
# If the legacy config option if it is set to the default
@ -170,21 +170,26 @@ class NetAppBlockStorageLibrary(object):
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
LOG.debug("Success getting list of LUNs from server.")
self.loopingcalls.start_tasks()
self._start_periodic_tasks()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval.
def _start_periodic_tasks(self):
"""Start recurring tasks common to all Data ONTAP block drivers."""
Inheriting class overrides and then explicitly calls this method.
"""
# Add the task that deletes snapshots marked for deletion.
self.loopingcalls.add_task(
self._delete_snapshots_marked_for_deletion,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
# Start the task that runs other housekeeping tasks, such as deletion
# of previously soft-deleted storage artifacts.
housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._handle_housekeeping_tasks)
housekeeping_periodic_task.start(
interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
def _delete_snapshots_marked_for_deletion(self):
volume_list = self._get_backing_flexvol_names()
snapshots = self.zapi_client.get_snapshots_marked_for_deletion(
volume_list)
for snapshot in snapshots:
self.zapi_client.delete_snapshot(
snapshot['volume_name'], snapshot['name'])
def get_pool(self, volume):
"""Return pool name where volume resides.
@ -1081,8 +1086,14 @@ class NetAppBlockStorageLibrary(object):
source_snapshot=cgsnapshot['id'])
for flexvol in flexvols:
self.zapi_client.wait_for_busy_snapshot(flexvol, cgsnapshot['id'])
self.zapi_client.delete_snapshot(flexvol, cgsnapshot['id'])
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol, cgsnapshot['id'])
self.zapi_client.delete_snapshot(
flexvol, cgsnapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol, cgsnapshot['id'])
return None, None
@ -1127,3 +1138,7 @@ class NetAppBlockStorageLibrary(object):
self._clone_source_to_destination(source, volume)
return None, None
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
raise NotImplementedError()

View File

@ -25,7 +25,6 @@ Volume driver library for NetApp C-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
@ -36,13 +35,13 @@ from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
@six.add_metaclass(utils.TraceWrapperMetaclass)
@ -103,29 +102,36 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise exception.NetAppDriverException(msg)
self._add_looping_tasks()
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
def _start_periodic_tasks(self):
"""Start recurring tasks for NetApp cDOT block drivers."""
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
# Note(cknight): Run the task once in the current thread to prevent a
# Note(cknight): Run the update once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Start the task that updates the slow-changing storage service catalog
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc)
ssc_periodic_task.start(
interval=SSC_UPDATE_INTERVAL_SECONDS,
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
# Add the task that updates the slow-changing storage service catalog
self.loopingcalls.add_task(self._update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR)
super(NetAppBlockStorageCmodeLibrary, self)._start_periodic_tasks()
# Add the task that harvests soft-deleted QoS policy groups.
self.loopingcalls.add_task(
self.zapi_client.remove_unused_qos_policy_groups,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
self.loopingcalls.add_task(
self._handle_housekeeping_tasks,
loopingcalls.TEN_MINUTES,
0)
super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks()
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
(super(NetAppBlockStorageCmodeLibrary, self).
_handle_housekeeping_tasks())
# Harvest soft-deleted QoS policy groups
self.zapi_client.remove_unused_qos_policy_groups()
@ -412,3 +418,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
"""Failover a backend to a secondary replication target."""
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
return self.ssc_library.get_ssc().keys()

View File

@ -549,3 +549,40 @@ class Client(client_base.Client):
raise exception.SnapshotNotFound(snapshot_id=snapshot_name)
return snapshot
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion."""
snapshots = []
for volume_name in volume_list:
api_args = {
'target-name': volume_name,
'target-type': 'volume',
'terse': 'true',
}
result = self.send_request('snapshot-list-info', api_args)
snapshots.extend(
self._parse_snapshot_list_info_result(result, volume_name))
return snapshots
def _parse_snapshot_list_info_result(self, result, volume_name):
snapshots = []
snapshots_elem = result.get_child_by_name(
'snapshots') or netapp_api.NaElement('none')
snapshot_info_list = snapshots_elem.get_children()
for snapshot_info in snapshot_info_list:
snapshot_name = snapshot_info.get_child_content('name')
snapshot_busy = strutils.bool_from_string(
snapshot_info.get_child_content('busy'))
snapshot_id = snapshot_info.get_child_content(
'snapshot-instance-uuid')
if (not snapshot_busy and
snapshot_name.startswith(client_base.DELETED_PREFIX)):
snapshots.append({
'name': snapshot_name,
'instance_id': snapshot_id,
'volume_name': volume_name,
})
return snapshots

View File

@ -34,6 +34,8 @@ from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
@ -457,3 +459,17 @@ class Client(object):
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.send_request('snapshot-rename', api_args)

View File

@ -34,7 +34,6 @@ from oslo_utils import strutils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
DEFAULT_MAX_PAGE_LENGTH = 50
@ -546,7 +545,7 @@ class Client(client_base.Client):
# matching that pattern.
if spec is not None:
current_name = spec['policy_name']
new_name = DELETED_PREFIX + current_name
new_name = client_base.DELETED_PREFIX + current_name
try:
self.qos_policy_group_rename(current_name, new_name)
except netapp_api.NaApiError as ex:
@ -562,7 +561,7 @@ class Client(client_base.Client):
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': '%s*' % DELETED_PREFIX,
'policy-group': '%s*' % client_base.DELETED_PREFIX,
'vserver': self.vserver,
}
},
@ -1450,6 +1449,51 @@ class Client(client_base.Client):
return counter_data
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion.
:param volume_list: placeholder parameter to match 7mode client method
signature.
"""
api_args = {
'query': {
'snapshot-info': {
'name': client_base.DELETED_PREFIX + '*',
'vserver': self.vserver,
'busy': 'false',
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'snapshot-instance-uuid': None,
}
},
}
result = self.send_request('snapshot-get-iter', api_args)
snapshots = []
attributes = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
snapshot_info_list = attributes.get_children()
for snapshot_info in snapshot_info_list:
snapshot_name = snapshot_info.get_child_content('name')
snapshot_id = snapshot_info.get_child_content(
'snapshot-instance-uuid')
snapshot_volume = snapshot_info.get_child_content('volume')
snapshots.append({
'name': snapshot_name,
'instance_id': snapshot_id,
'volume_name': snapshot_volume,
})
return snapshots
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
api_args = {

View File

@ -80,8 +80,13 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._add_looping_tasks()
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetApp7modeNfsDriver, self)._add_looping_tasks()
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
@ -223,7 +228,17 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
# 7-mode DOT does not support QoS.
return
def _get_backing_flexvol_names(self, hosts):
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []
for nfs_share in self._mounted_shares:
flexvol_name = nfs_share.rsplit('/', 1)[1]
flexvol_names.append(flexvol_name)
LOG.debug("Found flexvol %s", flexvol_name)
return flexvol_names
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:

View File

@ -32,7 +32,6 @@ import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import urllib
@ -42,6 +41,7 @@ from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
@ -83,6 +83,7 @@ class NetAppNfsDriver(driver.ManageableVD,
self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
self.backend_name = self.host.split('@')[1]
self.loopingcalls = loopingcalls.LoopingCalls()
def do_setup(self, context):
super(NetAppNfsDriver, self).do_setup(context)
@ -93,20 +94,26 @@ class NetAppNfsDriver(driver.ManageableVD,
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
super(NetAppNfsDriver, self).check_for_setup_error()
self._start_periodic_tasks()
self.loopingcalls.start_tasks()
def _start_periodic_tasks(self):
"""Start recurring tasks common to all Data ONTAP NFS drivers."""
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval.
# Start the task that runs other housekeeping tasks, such as deletion
# of previously soft-deleted storage artifacts.
housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._handle_housekeeping_tasks)
housekeeping_periodic_task.start(
interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
Inheriting class overrides and then explicitly calls this method.
"""
# Add the task that deletes snapshots marked for deletion.
self.loopingcalls.add_task(
self._delete_snapshots_marked_for_deletion,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
def _delete_snapshots_marked_for_deletion(self):
volume_list = self._get_backing_flexvol_names()
snapshots = self.zapi_client.get_snapshots_marked_for_deletion(
volume_list)
for snapshot in snapshots:
self.zapi_client.delete_snapshot(
snapshot['volume_name'], snapshot['name'])
def get_pool(self, volume):
"""Return pool name where volume resides.
@ -266,7 +273,11 @@ class NetAppNfsDriver(driver.ManageableVD,
"""Clone backing file for Cinder volume."""
raise NotImplementedError()
def _get_backing_flexvol_names(self, hosts):
def _get_backing_flexvol_names(self):
"""Returns backing flexvol names."""
raise NotImplementedError()
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
raise NotImplementedError()
@ -1083,7 +1094,7 @@ class NetAppNfsDriver(driver.ManageableVD,
"""
hosts = [snapshot['volume']['host'] for snapshot in snapshots]
flexvols = self._get_backing_flexvol_names(hosts)
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id'])
@ -1096,9 +1107,14 @@ class NetAppNfsDriver(driver.ManageableVD,
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, cgsnapshot['id'])
self.zapi_client.delete_snapshot(flexvol_name, cgsnapshot['id'])
self.zapi_client.delete_snapshot(
flexvol_name, cgsnapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol_name, cgsnapshot['id'])
return None, None
@ -1118,16 +1134,19 @@ class NetAppNfsDriver(driver.ManageableVD,
"""
LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes])
model_update = None
volumes_model_update = []
if cgsnapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
self.create_volume_from_snapshot(volume, snapshot)
update = self.create_volume_from_snapshot(volume, snapshot)
update['id'] = volume['id']
volumes_model_update.append(update)
elif source_cg and source_vols:
hosts = [source_vol['host'] for source_vol in source_vols]
flexvols = self._get_backing_flexvol_names(hosts)
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
snapshot_name = 'snapshot-temp-' + source_cg['id']
@ -1139,6 +1158,10 @@ class NetAppNfsDriver(driver.ManageableVD,
self._clone_backing_file_for_volume(
source_vol['name'], volume['name'],
source_vol['id'], source_snapshot=snapshot_name)
update = {'id': volume['id'],
'provider_location': source_vol['provider_location'],
}
volumes_model_update.append(update)
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
@ -1151,4 +1174,4 @@ class NetAppNfsDriver(driver.ManageableVD,
model_update = {}
model_update['status'] = 'error'
return model_update, None
return model_update, volumes_model_update

View File

@ -25,7 +25,6 @@ import os
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
import six
@ -38,6 +37,7 @@ from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
@ -45,7 +45,6 @@ from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
@interface.volumedriver
@ -96,28 +95,39 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
@utils.trace_method
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
self.ssc_library.check_api_permissions()
self._add_looping_tasks()
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
def _start_periodic_tasks(self):
"""Start recurring tasks for NetApp cDOT NFS driver."""
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
# Note(cknight): Run the task once in the current thread to prevent a
# Note(cknight): Run the update once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Start the task that updates the slow-changing storage service catalog
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc)
ssc_periodic_task.start(
interval=SSC_UPDATE_INTERVAL_SECONDS,
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
# Add the task that updates the slow-changing storage service catalog
self.loopingcalls.add_task(self._update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR)
super(NetAppCmodeNfsDriver, self)._start_periodic_tasks()
# Add the task that harvests soft-deleted QoS policy groups.
self.loopingcalls.add_task(
self.zapi_client.remove_unused_qos_policy_groups,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
# Add the task that runs other housekeeping tasks, such as deletion
# of previously soft-deleted storage artifacts.
self.loopingcalls.add_task(
self._handle_housekeeping_tasks,
loopingcalls.TEN_MINUTES,
0)
super(NetAppCmodeNfsDriver, self)._add_looping_tasks()
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
super(NetAppCmodeNfsDriver, self)._handle_housekeeping_tasks()
# Harvest soft-deleted QoS policy groups
self.zapi_client.remove_unused_qos_policy_groups()
@ -676,7 +686,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self, hosts):
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
return self.ssc_library.get_ssc().keys()
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
ssc = self.ssc_library.get_ssc()

View File

@ -0,0 +1,43 @@
# Copyright (c) 2016 Chuck Fouts. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Collects and starts tasks created from oslo_service.loopingcall.
"""
from collections import namedtuple
from oslo_service import loopingcall
LoopingTask = namedtuple('LoopingTask',
['looping_call', 'interval', 'initial_delay'])
# Time intervals in seconds
ONE_MINUTE = 60
TEN_MINUTES = 600
ONE_HOUR = 3600
class LoopingCalls(object):
def __init__(self):
self.tasks = []
def add_task(self, call_function, interval, initial_delay=0):
looping_call = loopingcall.FixedIntervalLoopingCall(call_function)
task = LoopingTask(looping_call, interval, initial_delay)
self.tasks.append(task)
def start_tasks(self):
for task in self.tasks:
task.looping_call.start(task.interval, task.initial_delay)