Rework backup process to make it async
This patch updates the backup process to call the volume manager asynchronously to get the backup device in which to do the backup on. This fixes a major issue with certain cinder drivers that take a long time to create a temporary clone of the volume being backed up. Closes-Bug: #1916843 Change-Id: Ib861e1bc35247f932fbae3796ed9025a560461c4
This commit is contained in:
parent
43ca0d6e3c
commit
e38fb71aac
@ -342,7 +342,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
snapshot = objects.Snapshot.get_by_id(
|
||||
context, snapshot_id) if snapshot_id else None
|
||||
previous_status = volume.get('previous_status', None)
|
||||
updates = {}
|
||||
context.message_resource_id = backup.id
|
||||
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
|
||||
context.message_action = message_field.Action.BACKUP_CREATE
|
||||
@ -408,7 +407,9 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
backup.service = self.driver_name
|
||||
backup.save()
|
||||
updates = self._run_backup(context, backup, volume)
|
||||
|
||||
# Start backup, then continue_backup, then finish_backup
|
||||
self._start_backup(context, backup, volume)
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if snapshot_id:
|
||||
@ -421,42 +422,15 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'previous_status': 'error_backing-up'})
|
||||
volume_utils.update_backup_error(backup, str(err))
|
||||
|
||||
# Restore the original status.
|
||||
if snapshot_id:
|
||||
self.db.snapshot_update(
|
||||
context, snapshot_id,
|
||||
{'status': fields.SnapshotStatus.AVAILABLE})
|
||||
else:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': previous_status,
|
||||
'previous_status': 'backing-up'})
|
||||
def _start_backup(self, context, backup, volume):
|
||||
"""This starts the backup process.
|
||||
|
||||
# _run_backup method above updated the status for the backup, so it
|
||||
# will reflect latest status, even if it is deleted
|
||||
completion_msg = 'finished'
|
||||
if backup.status in (fields.BackupStatus.DELETING,
|
||||
fields.BackupStatus.DELETED):
|
||||
completion_msg = 'aborted'
|
||||
else:
|
||||
backup.status = fields.BackupStatus.AVAILABLE
|
||||
backup.size = volume['size']
|
||||
First we have to get the backup device from the volume manager.
|
||||
This can take a long time to complete. Once the volume manager
|
||||
is done creating/getting the backup device, then we get a callback
|
||||
to complete the process of backing up the volume.
|
||||
|
||||
if updates:
|
||||
backup.update(updates)
|
||||
backup.save()
|
||||
|
||||
# Handle the num_dependent_backups of parent backup when child
|
||||
# backup has created successfully.
|
||||
if backup.parent_id:
|
||||
parent_backup = objects.Backup.get_by_id(context,
|
||||
backup.parent_id)
|
||||
parent_backup.num_dependent_backups += 1
|
||||
parent_backup.save()
|
||||
LOG.info('Create backup %s. backup: %s.', completion_msg, backup.id)
|
||||
self._notify_about_backup_usage(context, backup, "create.end")
|
||||
|
||||
def _run_backup(self, context, backup, volume):
|
||||
message_created = False
|
||||
"""
|
||||
# Save a copy of the encryption key ID in case the volume is deleted.
|
||||
if (volume.encryption_key_id is not None and
|
||||
backup.encryption_key_id is None):
|
||||
@ -466,29 +440,26 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
volume.encryption_key_id)
|
||||
backup.save()
|
||||
|
||||
backup_service = self.service(context)
|
||||
# This is an async call to the volume manager. We will get a
|
||||
# callback from the volume manager to continue once it's done.
|
||||
LOG.info("Call Volume Manager to get_backup_device for %s", backup)
|
||||
self.volume_rpcapi.get_backup_device(context, backup, volume)
|
||||
|
||||
def continue_backup(self, context, backup, backup_device):
|
||||
"""This is the callback from the volume manager to continue."""
|
||||
message_created = False
|
||||
volume_id = backup.volume_id
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
snapshot_id = backup.snapshot_id
|
||||
snapshot = objects.Snapshot.get_by_id(
|
||||
context, snapshot_id) if snapshot_id else None
|
||||
previous_status = volume.get('previous_status', None)
|
||||
|
||||
backup_service = self.service(context)
|
||||
properties = volume_utils.brick_get_connector_properties()
|
||||
|
||||
# NOTE(geguileo): Not all I/O disk operations properly do greenthread
|
||||
# context switching and may end up blocking the greenthread, so we go
|
||||
# with native threads proxy-wrapping the device file object.
|
||||
updates = {}
|
||||
try:
|
||||
try:
|
||||
backup_device = self.volume_rpcapi.get_backup_device(context,
|
||||
backup,
|
||||
volume)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
# We set message_create to True before creating the
|
||||
# message because if the message create call fails
|
||||
# and is catched by the base/outer exception handler
|
||||
# then we will end up storing a wrong message
|
||||
message_created = True
|
||||
self.message_api.create_from_request_context(
|
||||
context,
|
||||
detail=
|
||||
message_field.Detail.BACKUP_CREATE_DEVICE_ERROR)
|
||||
try:
|
||||
attach_info = self._attach_device(context,
|
||||
backup_device.device_obj,
|
||||
@ -501,6 +472,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
self.message_api.create_from_request_context(
|
||||
context,
|
||||
detail=message_field.Detail.ATTACH_ERROR)
|
||||
|
||||
try:
|
||||
device_path = attach_info['device']['path']
|
||||
if (isinstance(device_path, str) and
|
||||
@ -540,6 +512,17 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
context,
|
||||
detail=
|
||||
message_field.Detail.DETACH_ERROR)
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if snapshot_id:
|
||||
snapshot.status = fields.SnapshotStatus.AVAILABLE
|
||||
snapshot.save()
|
||||
else:
|
||||
self.db.volume_update(
|
||||
context, volume_id,
|
||||
{'status': previous_status,
|
||||
'previous_status': 'error_backing-up'})
|
||||
volume_utils.update_backup_error(backup, str(err))
|
||||
finally:
|
||||
with backup.as_read_deleted():
|
||||
backup.refresh()
|
||||
@ -553,7 +536,47 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
context,
|
||||
detail=
|
||||
message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR)
|
||||
return updates
|
||||
|
||||
self._finish_backup(context, backup, volume, updates)
|
||||
|
||||
def _finish_backup(self, context, backup, volume, updates):
|
||||
volume_id = backup.volume_id
|
||||
snapshot_id = backup.snapshot_id
|
||||
previous_status = volume.get('previous_status', None)
|
||||
|
||||
# Restore the original status.
|
||||
if snapshot_id:
|
||||
self.db.snapshot_update(
|
||||
context, snapshot_id,
|
||||
{'status': fields.SnapshotStatus.AVAILABLE})
|
||||
else:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': previous_status,
|
||||
'previous_status': 'backing-up'})
|
||||
|
||||
# _run_backup method above updated the status for the backup, so it
|
||||
# will reflect latest status, even if it is deleted
|
||||
completion_msg = 'finished'
|
||||
if backup.status in (fields.BackupStatus.DELETING,
|
||||
fields.BackupStatus.DELETED):
|
||||
completion_msg = 'aborted'
|
||||
else:
|
||||
backup.status = fields.BackupStatus.AVAILABLE
|
||||
backup.size = volume['size']
|
||||
|
||||
if updates:
|
||||
backup.update(updates)
|
||||
backup.save()
|
||||
|
||||
# Handle the num_dependent_backups of parent backup when child
|
||||
# backup has created successfully.
|
||||
if backup.parent_id:
|
||||
parent_backup = objects.Backup.get_by_id(context,
|
||||
backup.parent_id)
|
||||
parent_backup.num_dependent_backups += 1
|
||||
parent_backup.save()
|
||||
LOG.info('Create backup %s. backup: %s.', completion_msg, backup.id)
|
||||
self._notify_about_backup_usage(context, backup, "create.end")
|
||||
|
||||
def _is_our_backup(self, backup):
|
||||
# Accept strings and Service OVO
|
||||
|
@ -47,9 +47,10 @@ class BackupAPI(rpc.RPCAPI):
|
||||
2.0 - Remove 1.x compatibility
|
||||
2.1 - Adds set_log_levels and get_log_levels
|
||||
2.2 - Adds publish_service_capabilities
|
||||
2.3 - Adds continue_backup call
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '2.2'
|
||||
RPC_API_VERSION = '2.3'
|
||||
RPC_DEFAULT_VERSION = '2.0'
|
||||
TOPIC = constants.BACKUP_TOPIC
|
||||
BINARY = 'cinder-backup'
|
||||
@ -59,6 +60,12 @@ class BackupAPI(rpc.RPCAPI):
|
||||
cctxt = self._get_cctxt(server=backup.host)
|
||||
cctxt.cast(ctxt, 'create_backup', backup=backup)
|
||||
|
||||
def continue_backup(self, ctxt, backup, backup_device):
|
||||
LOG.debug("continue_backup in rpcapi backup_id %s", backup.id)
|
||||
cctxt = self._get_cctxt(server=backup.host)
|
||||
cctxt.cast(ctxt, 'continue_backup', backup=backup,
|
||||
backup_device=backup_device)
|
||||
|
||||
def restore_backup(self, ctxt, backup_host, backup, volume_id):
|
||||
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
|
||||
cctxt = self._get_cctxt(server=backup_host)
|
||||
|
@ -34,6 +34,7 @@ from cinder.backup import manager
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.message import message_field
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import quota
|
||||
@ -630,7 +631,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup')
|
||||
mock_run_backup = self.mock_object(self.backup_mgr, '_start_backup')
|
||||
mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4()))
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.create_backup,
|
||||
@ -643,22 +644,25 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
|
||||
self.assertTrue(mock_run_backup.called)
|
||||
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
|
||||
def test_create_backup_aborted(self, run_backup_mock):
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
|
||||
def test_create_backup_aborted(self, start_backup_mock):
|
||||
"""Test error handling when abort occurs during backup creation."""
|
||||
def my_run_backup(*args, **kwargs):
|
||||
def my_start_backup(*args, **kwargs):
|
||||
backup.destroy()
|
||||
with backup.as_read_deleted():
|
||||
original_refresh()
|
||||
|
||||
run_backup_mock.side_effect = my_run_backup
|
||||
start_backup_mock.side_effect = my_start_backup
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
original_refresh = backup.refresh
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.backup_mgr._finish_backup(self.ctxt, backup, vol, {})
|
||||
|
||||
self.assertTrue(run_backup_mock.called)
|
||||
self.assertTrue(start_backup_mock.called)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.assertEqual('available', vol.status)
|
||||
@ -668,9 +672,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
backup.refresh()
|
||||
self.assertEqual(fields.BackupStatus.DELETED, backup.status)
|
||||
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup',
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup',
|
||||
side_effect=FakeBackupException(str(uuid.uuid4())))
|
||||
def test_create_backup_with_snapshot_error(self, mock_run_backup):
|
||||
def test_create_backup_with_snapshot_error(self, mock_start_backup):
|
||||
"""Test error handling when error occurs during backup creation."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
snapshot = self._create_snapshot_db_entry(status='backing-up',
|
||||
@ -687,7 +691,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
|
||||
backup.refresh()
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
|
||||
self.assertTrue(mock_run_backup.called)
|
||||
self.assertTrue(mock_start_backup.called)
|
||||
|
||||
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
|
||||
@ -704,7 +708,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
backup_device_dict = {'backup_device': vol, 'secure_enabled': False,
|
||||
'is_snapshot': False, }
|
||||
mock_get_backup_device.return_value = (
|
||||
mock_backup_device = (
|
||||
objects.BackupDeviceInfo.from_primitive(backup_device_dict,
|
||||
self.ctxt,
|
||||
['admin_metadata',
|
||||
@ -719,6 +723,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_get_conn.return_value = properties
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device)
|
||||
|
||||
mock_temporary_chown.assert_called_once_with('/dev/null')
|
||||
mock_attach_device.assert_called_once_with(self.ctxt, vol,
|
||||
@ -769,6 +774,8 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_brick.return_value = properties
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.backup_mgr.continue_backup(self.ctxt, backup,
|
||||
mock_backup_device)
|
||||
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
|
||||
@ -804,6 +811,8 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_brick.return_value = properties
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.backup_mgr.continue_backup(self.ctxt, backup,
|
||||
mock_backup_device)
|
||||
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
|
||||
@ -820,6 +829,11 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_brick):
|
||||
vol_id = self._create_volume_db_entry()
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
# These are set in create_backup, but we are calling
|
||||
# continue_backup
|
||||
self.ctxt.message_resource_id = backup.id
|
||||
self.ctxt.message_resource_type = message_field.Resource.VOLUME_BACKUP
|
||||
self.ctxt.message_action = message_field.Action.BACKUP_CREATE
|
||||
|
||||
with mock.patch.object(self.backup_mgr, 'service') as \
|
||||
mock_service:
|
||||
@ -838,8 +852,8 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_brick.return_value = properties
|
||||
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.create_backup,
|
||||
self.ctxt, backup)
|
||||
self.backup_mgr.continue_backup,
|
||||
self.ctxt, backup, mock_backup_device)
|
||||
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual('available', vol.status)
|
||||
@ -847,6 +861,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
|
||||
|
||||
@mock.patch('cinder.backup.manager.BackupManager._finish_backup')
|
||||
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
|
||||
@mock.patch('cinder.utils.temporary_chown')
|
||||
@ -856,7 +871,8 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_open,
|
||||
mock_chown,
|
||||
mock_backup_device,
|
||||
mock_brick):
|
||||
mock_brick,
|
||||
mock_finish):
|
||||
backup_service = mock.Mock()
|
||||
backup_service.backup = mock.Mock(
|
||||
return_value=mock.sentinel.backup_update)
|
||||
@ -872,22 +888,24 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.backup_mgr._attach_device = mock.Mock(
|
||||
return_value=attach_info)
|
||||
self.backup_mgr._detach_device = mock.Mock()
|
||||
output = self.backup_mgr._run_backup(self.ctxt, backup, volume)
|
||||
self.backup_mgr.continue_backup(self.ctxt, backup,
|
||||
mock_backup_device)
|
||||
|
||||
mock_chown.assert_not_called()
|
||||
mock_open.assert_not_called()
|
||||
backup_service.backup.assert_called_once_with(
|
||||
backup, device_path)
|
||||
self.assertEqual(mock.sentinel.backup_update, output)
|
||||
mock_finish.called_once_with(self.ctxt, backup, volume,
|
||||
mock.sentinel.backup_update)
|
||||
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
|
||||
@ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'),
|
||||
(fields.SnapshotStatus.BACKING_UP, 'in-use'),
|
||||
(fields.SnapshotStatus.AVAILABLE, 'available'),
|
||||
(fields.SnapshotStatus.AVAILABLE, 'in-use'))
|
||||
@ddt.unpack
|
||||
def test_create_backup_with_snapshot(self, snapshot_status, volume_status,
|
||||
mock_run_backup):
|
||||
mock_start_backup):
|
||||
vol_id = self._create_volume_db_entry(status=volume_status)
|
||||
snapshot = self._create_snapshot_db_entry(volume_id=vol_id,
|
||||
status=snapshot_status)
|
||||
@ -896,6 +914,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
if snapshot_status == fields.SnapshotStatus.BACKING_UP:
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.backup_mgr._finish_backup(self.ctxt, backup, vol, {})
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
|
||||
|
||||
@ -926,7 +947,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
snap = self._create_snapshot_db_entry(volume_id=vol_id)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
mock_get_backup_device.return_value = (
|
||||
mock_backup_device = (
|
||||
objects.BackupDeviceInfo.from_primitive({
|
||||
'backup_device': snap, 'secure_enabled': False,
|
||||
'is_snapshot': True, },
|
||||
@ -951,6 +972,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_open.return_value = open('/dev/null', 'rb')
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device)
|
||||
mock_temporary_chown.assert_called_once_with('/dev/null')
|
||||
mock_initialize_connection_snapshot.assert_called_once_with(
|
||||
self.ctxt, snap, properties)
|
||||
@ -1028,9 +1050,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
self.mock_object(self.backup_mgr, '_run_backup')
|
||||
self.mock_object(self.backup_mgr, '_start_backup')
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
self.assertEqual(1, notify.call_count)
|
||||
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
|
||||
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
|
||||
@ -1892,7 +1914,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertEqual(1, mock_restore.call_count)
|
||||
self.assertEqual(1, mock_sem.__exit__.call_count)
|
||||
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
|
||||
def test_backup_max_operations_backup(self, mock_backup):
|
||||
mock_sem = self.mock_object(self.backup_mgr, '_semaphore')
|
||||
vol_id = self._create_volume_db_entry(
|
||||
|
@ -29,7 +29,7 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
@mock.patch('cinder.message.api.API.create_from_request_context')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.is_working')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.'
|
||||
'_notify_about_backup_usage')
|
||||
@ -61,7 +61,7 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
@mock.patch('cinder.message.api.API.create_from_request_context')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.is_working')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.'
|
||||
'_notify_about_backup_usage')
|
||||
@ -92,44 +92,6 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
fake_context,
|
||||
detail=message_field.Detail.BACKUP_SERVICE_DOWN)
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
@mock.patch('cinder.message.api.API.create_from_request_context')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.is_working')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.'
|
||||
'_notify_about_backup_usage')
|
||||
@mock.patch(
|
||||
'cinder.backup.manager.volume_utils.brick_get_connector_properties')
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
|
||||
@mock.patch('cinder.backup.manager.BackupManager.'
|
||||
'_cleanup_temp_volumes_snapshots_when_backup_created')
|
||||
def test_backup_create_device_error(
|
||||
self, mock_cleanup, mock_get_bak_dev, mock_get_conn, mock_notify,
|
||||
mock_working, mock_msg_create, mock_get_vol, mock_vol_update):
|
||||
manager = backup_manager.BackupManager()
|
||||
fake_context = mock.MagicMock()
|
||||
fake_backup = mock.MagicMock(
|
||||
id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID,
|
||||
snapshot_id=None)
|
||||
mock_vol = mock.MagicMock()
|
||||
mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__
|
||||
mock_get_vol.return_value = mock_vol
|
||||
mock_working.return_value = True
|
||||
mock_get_bak_dev.side_effect = exception.InvalidVolume(
|
||||
reason="test reason")
|
||||
|
||||
self.assertRaises(exception.InvalidVolume, manager.create_backup,
|
||||
fake_context, fake_backup)
|
||||
self.assertEqual(message_field.Action.BACKUP_CREATE,
|
||||
fake_context.message_action)
|
||||
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
|
||||
fake_context.message_resource_type)
|
||||
self.assertEqual(fake_backup.id,
|
||||
fake_context.message_resource_id)
|
||||
mock_msg_create.assert_called_with(
|
||||
fake_context,
|
||||
detail=message_field.Detail.BACKUP_CREATE_DEVICE_ERROR)
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
@mock.patch('cinder.message.api.API.create_from_request_context')
|
||||
@ -155,16 +117,11 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__
|
||||
mock_get_vol.return_value = mock_vol
|
||||
mock_working.return_value = True
|
||||
backup_device = mock.MagicMock()
|
||||
mock_attach.side_effect = exception.InvalidVolume(reason="test reason")
|
||||
|
||||
self.assertRaises(exception.InvalidVolume, manager.create_backup,
|
||||
fake_context, fake_backup)
|
||||
self.assertEqual(message_field.Action.BACKUP_CREATE,
|
||||
fake_context.message_action)
|
||||
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
|
||||
fake_context.message_resource_type)
|
||||
self.assertEqual(fake_backup.id,
|
||||
fake_context.message_resource_id)
|
||||
self.assertRaises(exception.InvalidVolume, manager.continue_backup,
|
||||
fake_context, fake_backup, backup_device)
|
||||
mock_msg_create.assert_called_with(
|
||||
fake_context,
|
||||
detail=message_field.Detail.ATTACH_ERROR)
|
||||
@ -198,17 +155,12 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__
|
||||
mock_get_vol.return_value = mock_vol
|
||||
mock_working.return_value = True
|
||||
backup_device = mock.MagicMock()
|
||||
mock_attach.return_value = {'device': {'path': '/dev/sdb'}}
|
||||
mock_backup.side_effect = exception.InvalidBackup(reason="test reason")
|
||||
|
||||
self.assertRaises(exception.InvalidBackup, manager.create_backup,
|
||||
fake_context, fake_backup)
|
||||
self.assertEqual(message_field.Action.BACKUP_CREATE,
|
||||
fake_context.message_action)
|
||||
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
|
||||
fake_context.message_resource_type)
|
||||
self.assertEqual(fake_backup.id,
|
||||
fake_context.message_resource_id)
|
||||
self.assertRaises(exception.InvalidBackup, manager.continue_backup,
|
||||
fake_context, fake_backup, backup_device)
|
||||
mock_msg_create.assert_called_with(
|
||||
fake_context,
|
||||
detail=message_field.Detail.BACKUP_CREATE_DRIVER_ERROR)
|
||||
@ -242,17 +194,12 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__
|
||||
mock_get_vol.return_value = mock_vol
|
||||
mock_working.return_value = True
|
||||
backup_device = mock.MagicMock()
|
||||
mock_attach.return_value = {'device': {'path': '/dev/sdb'}}
|
||||
mock_detach.side_effect = exception.InvalidVolume(reason="test reason")
|
||||
|
||||
self.assertRaises(exception.InvalidVolume, manager.create_backup,
|
||||
fake_context, fake_backup)
|
||||
self.assertEqual(message_field.Action.BACKUP_CREATE,
|
||||
fake_context.message_action)
|
||||
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
|
||||
fake_context.message_resource_type)
|
||||
self.assertEqual(fake_backup.id,
|
||||
fake_context.message_resource_id)
|
||||
self.assertRaises(exception.InvalidVolume, manager.continue_backup,
|
||||
fake_context, fake_backup, backup_device)
|
||||
mock_msg_create.assert_called_with(
|
||||
fake_context,
|
||||
detail=message_field.Detail.DETACH_ERROR)
|
||||
@ -286,18 +233,13 @@ class BackupUserMessagesTest(test.TestCase):
|
||||
mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__
|
||||
mock_get_vol.return_value = mock_vol
|
||||
mock_working.return_value = True
|
||||
backup_device = mock.MagicMock()
|
||||
mock_attach.return_value = {'device': {'path': '/dev/sdb'}}
|
||||
mock_cleanup.side_effect = exception.InvalidVolume(
|
||||
reason="test reason")
|
||||
|
||||
self.assertRaises(exception.InvalidVolume, manager.create_backup,
|
||||
fake_context, fake_backup)
|
||||
self.assertEqual(message_field.Action.BACKUP_CREATE,
|
||||
fake_context.message_action)
|
||||
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
|
||||
fake_context.message_resource_type)
|
||||
self.assertEqual(fake_backup.id,
|
||||
fake_context.message_resource_id)
|
||||
self.assertRaises(exception.InvalidVolume, manager.continue_backup,
|
||||
fake_context, fake_backup, backup_device)
|
||||
mock_msg_create.assert_called_with(
|
||||
fake_context,
|
||||
detail=message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR)
|
||||
|
@ -413,7 +413,24 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
'volume_id': self.fake_volume_obj.id})
|
||||
|
||||
@ddt.data(None, 'mycluster')
|
||||
def test_get_backup_device(self, cluster_name):
|
||||
def test_get_backup_device_cast(self, cluster_name):
|
||||
self._change_cluster_name(self.fake_volume_obj, cluster_name)
|
||||
self._test_rpc_api('get_backup_device',
|
||||
rpc_method='cast',
|
||||
server=cluster_name or self.fake_volume_obj.host,
|
||||
backup=self.fake_backup_obj,
|
||||
volume=self.fake_volume_obj,
|
||||
expected_kwargs_diff={
|
||||
'want_objects': True,
|
||||
'async_call': True,
|
||||
},
|
||||
retval=None,
|
||||
version='3.17')
|
||||
|
||||
@ddt.data(None, 'mycluster')
|
||||
def test_get_backup_device_call(self, cluster_name):
|
||||
self.can_send_version_mock.side_effect = (False, False, True, False,
|
||||
True)
|
||||
self._change_cluster_name(self.fake_volume_obj, cluster_name)
|
||||
backup_device_dict = {'backup_device': self.fake_volume,
|
||||
'is_snapshot': False,
|
||||
@ -433,7 +450,8 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
|
||||
@ddt.data(None, 'mycluster')
|
||||
def test_get_backup_device_old(self, cluster_name):
|
||||
self.can_send_version_mock.side_effect = (True, False, False)
|
||||
self.can_send_version_mock.side_effect = (False, False, False, False,
|
||||
False)
|
||||
self._change_cluster_name(self.fake_volume_obj, cluster_name)
|
||||
backup_device_dict = {'backup_device': self.fake_volume,
|
||||
'is_snapshot': False,
|
||||
|
@ -57,6 +57,7 @@ profiler = importutils.try_import('osprofiler.profiler')
|
||||
import requests
|
||||
from taskflow import exceptions as tfe
|
||||
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder.common import constants
|
||||
from cinder import compute
|
||||
from cinder import context
|
||||
@ -4736,7 +4737,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
def get_backup_device(self,
|
||||
ctxt: context.RequestContext,
|
||||
backup: objects.Backup,
|
||||
want_objects: bool = False):
|
||||
want_objects: bool = False,
|
||||
async_call: bool = False):
|
||||
(backup_device, is_snapshot) = (
|
||||
self.driver.get_backup_device(ctxt, backup))
|
||||
secure_enabled = self.driver.secure_file_operations_enabled()
|
||||
@ -4745,9 +4747,20 @@ class VolumeManager(manager.CleanableManager,
|
||||
'is_snapshot': is_snapshot, }
|
||||
# TODO(sborkows): from_primitive method will be removed in O, so there
|
||||
# is a need to clean here then.
|
||||
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
|
||||
ctxt)
|
||||
if want_objects else backup_device_dict)
|
||||
backup_device = (objects.BackupDeviceInfo.from_primitive(
|
||||
backup_device_dict, ctxt)
|
||||
if want_objects else backup_device_dict)
|
||||
|
||||
if async_call:
|
||||
# we have to use an rpc call back to the backup manager to
|
||||
# continue the backup
|
||||
LOG.info("Calling backup continue_backup for: %s", backup)
|
||||
rpcapi = backup_rpcapi.BackupAPI()
|
||||
rpcapi.continue_backup(ctxt, backup, backup_device)
|
||||
else:
|
||||
# The rpc api version doesn't support the async callback
|
||||
# so we fallback to returning the value itself.
|
||||
return backup_device
|
||||
|
||||
def secure_file_operations_enabled(
|
||||
self,
|
||||
|
@ -136,9 +136,10 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
failover_replication, and list_replication_targets.
|
||||
3.15 - Add revert_to_snapshot method
|
||||
3.16 - Add no_snapshots to accept_transfer method
|
||||
3.17 - Make get_backup_device a cast (async)
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.16'
|
||||
RPC_API_VERSION = '3.17'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.VOLUME_TOPIC
|
||||
BINARY = constants.VOLUME_BINARY
|
||||
@ -360,8 +361,13 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
return cctxt.call(ctxt, 'get_capabilities', discover=discover)
|
||||
|
||||
def get_backup_device(self, ctxt, backup, volume):
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.2', '3.0'))
|
||||
if cctxt.can_send_version('3.2'):
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.17', '3.2',
|
||||
'3.0'))
|
||||
if cctxt.can_send_version('3.17'):
|
||||
cctxt.cast(ctxt, 'get_backup_device', backup=backup,
|
||||
want_objects=True, async_call=True)
|
||||
backup_obj = None
|
||||
elif cctxt.can_send_version('3.2'):
|
||||
backup_obj = cctxt.call(ctxt, 'get_backup_device', backup=backup,
|
||||
want_objects=True)
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user