Fixes resizes for volumes attached to active Nova servers

Currently, Trove fails to resize volumes attached to active Nova
servers. It also skips crucial steps such as unmounting and
mounting the volume and resizing the filesystem. When extending a
volume attached to an active Nova server, the guest must be
stopped, the volume unmounted, detached, and extended. Once the
underlying volume has been extended the volume should be reattached,
have the file system resized, mounted, and then the guest should be
restarted. If all operations complete successfully the task status
should be set to NONE and a usage event sent, reflecting the size of
the volume as reported by Cinder.

This fixes extending volumes attached to active Nova servers to ensure
a proper resize is performed and the task status is set to NONE and a
usage event is sent after all resize operations complete successfully.
Unit tests are also added.

Change-Id: I9a5785f539a2ee138ce1ded3cbd795ef91d66400
Closes-Bug: #1259976
This commit is contained in:
Paul Marshall 2013-12-11 09:56:00 -06:00
parent b4100f7f30
commit 522f6df6ad
9 changed files with 410 additions and 118 deletions

View File

@ -314,3 +314,18 @@ def correct_id_with_req(id, request):
def generate_random_password(password_length=CONF.default_password_length):
return passlib_utils.generate_password(size=password_length)
def try_recover(func):
def _decorator(*args, **kwargs):
recover_func = kwargs.pop("recover_func", None)
try:
func(*args, **kwargs)
except Exception:
if recover_func is not None:
recover_func(func)
else:
LOG.debug(_("No recovery method defined for %(func)s") % {
'func': func.__name__})
raise
return _decorator

View File

@ -280,3 +280,24 @@ class API(proxy.RpcProxy):
"for Instance %(instance_id)s") %
{'backup_id': backup_info['id'], 'instance_id': self.id})
self._cast("create_backup", backup_info=backup_info)
def mount_volume(self, device_path=None, mount_point=None):
"""Mount the volume"""
LOG.debug(_("Mount volume %(mount)s on instance %(id)s") % {
'mount': mount_point, 'id': self.id})
self._call("mount_volume", AGENT_LOW_TIMEOUT,
device_path=device_path, mount_point=mount_point)
def unmount_volume(self, device_path=None, mount_point=None):
"""Unmount the volume"""
LOG.debug(_("Unmount volume %(device)s on instance %(id)s") % {
'device': device_path, 'id': self.id})
self._call("unmount_volume", AGENT_LOW_TIMEOUT,
device_path=device_path, mount_point=mount_point)
def resize_fs(self, device_path=None, mount_point=None):
"""Resize the filesystem"""
LOG.debug(_("Resize device %(device)s on instance %(id)s") % {
'device': device_path, 'id': self.id})
self._call("resize_fs", AGENT_LOW_TIMEOUT, device_path=device_path,
mount_point=mount_point)

View File

@ -175,3 +175,18 @@ class Manager(periodic_task.PeriodicTasks):
backup task, location, type, and other data.
"""
backup.backup(context, backup_info)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem"))

View File

@ -37,13 +37,13 @@ class VolumeDevice(object):
def migrate_data(self, mysql_base):
"""Synchronize the data from the mysql directory to the new volume """
self._tmp_mount(TMP_MOUNT_POINT)
self.mount(TMP_MOUNT_POINT, write_to_fstab=False)
if not mysql_base[-1] == '/':
mysql_base = "%s/" % mysql_base
utils.execute("sudo", "rsync", "--safe-links", "--perms",
"--recursive", "--owner", "--group", "--xattrs",
"--sparse", mysql_base, TMP_MOUNT_POINT)
self.unmount()
self.unmount(TMP_MOUNT_POINT)
def _check_device_exists(self):
"""Check that the device path exists.
@ -91,31 +91,29 @@ class VolumeDevice(object):
self._format()
self._check_format()
def mount(self, mount_point):
def mount(self, mount_point, write_to_fstab=True):
"""Mounts, and writes to fstab."""
mount_point = VolumeMountPoint(self.device_path, mount_point)
mount_point.mount()
mount_point.write_to_fstab()
if write_to_fstab:
mount_point.write_to_fstab()
#TODO(tim.simpson): Are we using this?
def resize_fs(self):
def resize_fs(self, mount_point):
"""Resize the filesystem on the specified device"""
self._check_device_exists()
try:
# check if the device is mounted at mount_point before e2fsck
if not os.path.ismount(mount_point):
utils.execute("sudo", "e2fsck", "-f", "-n", self.device_path)
utils.execute("sudo", "resize2fs", self.device_path)
except ProcessExecutionError as err:
LOG.error(err)
raise GuestError("Error resizing the filesystem: %s" %
self.device_path)
def _tmp_mount(self, mount_point):
"""Mounts, but doesn't save to fstab."""
mount_point = VolumeMountPoint(self.device_path, mount_point)
mount_point.mount() # Don't save to fstab.
def unmount(self):
if os.path.exists(self.device_path):
cmd = "sudo umount %s" % self.device_path
def unmount(self, mount_point):
if os.path.exists(mount_point):
cmd = "sudo umount %s" % mount_point
child = pexpect.spawn(cmd)
child.expect(pexpect.EOF)
@ -131,7 +129,7 @@ class VolumeMountPoint(object):
def mount(self):
if not os.path.exists(self.mount_point):
utils.execute("sudo", "mkdir", "-p", self.mount_point)
LOG.debug("Adding volume. Device path:%s, mount_point:%s, "
LOG.debug("Mounting volume. Device path:%s, mount_point:%s, "
"volume_type:%s, mount options:%s" %
(self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))

View File

@ -23,6 +23,7 @@ from trove.backup import models as bkup_models
from trove.common import cfg
from trove.common import template
from trove.common import utils
from trove.common.utils import try_recover
from trove.common.exception import GuestError
from trove.common.exception import GuestTimeout
from trove.common.exception import PollTimeOut
@ -695,95 +696,11 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
server=old_server)
LOG.debug(_("end _delete_resources for id: %s") % self.id)
def _resize_active_volume(self, new_size):
try:
LOG.debug(_("Instance %s calling stop_db...") % self.server.id)
self.guest.stop_db()
LOG.debug(_("Detach volume %(vol_id)s from instance %(id)s") %
{'vol_id': self.volume_id, 'id': self.server.id})
self.volume_client.volumes.detach(self.volume_id)
utils.poll_until(
lambda: self.volume_client.volumes.get(self.volume_id),
lambda volume: volume.status == 'available',
sleep_time=2,
time_out=CONF.volume_time_out)
LOG.debug(_("Successfully detach volume %s") % self.volume_id)
except Exception as e:
LOG.debug(_("end _resize_active_volume for id: %s") %
self.server.id)
LOG.exception(_("Failed to detach volume %(volume_id)s "
"instance %(id)s: %(e)s") %
{'volume_id': self.volume_id, 'id':
self.server.id, 'e': str(e)})
self.restart()
raise
self._do_resize(new_size)
self.volume_client.volumes.attach(self.server.id, self.volume_id)
LOG.debug(_("end _resize_active_volume for id: %s") % self.server.id)
self.restart()
def _do_resize(self, new_size):
try:
self.volume_client.volumes.extend(self.volume_id, new_size)
except cinder_exceptions.ClientException:
LOG.exception(_("Error encountered trying to rescan or resize the "
"attached volume filesystem for volume: "
"%s") % self.volume_id)
raise
try:
volume = self.volume_client.volumes.get(self.volume_id)
if not volume:
raise (cinder_exceptions.
ClientException(_('Failed to get volume with '
'id: %(id)s') %
{'id': self.volume_id}))
utils.poll_until(
lambda: self.volume_client.volumes.get(self.volume_id),
lambda volume: volume.size == int(new_size),
sleep_time=2,
time_out=CONF.volume_time_out)
self.update_db(volume_size=new_size)
except PollTimeOut:
LOG.error(_("Timeout trying to rescan or resize the attached "
"volume filesystem for volume %(vol_id)s of "
"instance: %(id)s") %
{'vol_id': self.volume_id, 'id': self.id})
except Exception as e:
LOG.exception(_("Error encountered trying to rescan or resize the "
"attached volume filesystem of volume %(vol_id)s of "
"instance %(id)s: %(e)s") %
{'vol_id': self.volume_id, 'id': self.id, 'e': e})
finally:
self.update_db(task_status=inst_models.InstanceTasks.NONE)
def resize_volume(self, new_size):
LOG.debug(_("begin resize_volume for id: %s") % self.id)
old_volume_size = self.volume_size
new_size = int(new_size)
LOG.debug(_("%(gt)s: Resizing instance %(instance_id)s volume for "
"server %(server_id)s from %(old_volume_size)s to "
"%(new_size)r GB")
% {'gt': greenthread.getcurrent(),
'instance_id': self.id,
'server_id': self.server.id,
'old_volume_size': old_volume_size,
'new_size': new_size})
if self.server.status == 'active':
self._resize_active_volume(new_size)
else:
self._do_resize(new_size)
self.send_usage_event('modify_volume', old_volume_size=old_volume_size,
launched_at=timeutils.isotime(self.updated),
modify_at=timeutils.isotime(self.updated),
volume_size=new_size)
LOG.debug(_("end resize_volume for id: %s") % self.id)
LOG.debug(_("begin resize_volume for instance: %s") % self.id)
action = ResizeVolumeAction(self, self.volume_size, new_size)
action.execute()
LOG.debug(_("end resize_volume for instance: %s") % self.id)
def resize_flavor(self, old_flavor, new_flavor):
action = ResizeAction(self, old_flavor, new_flavor)
@ -918,6 +835,222 @@ class BackupTasks(object):
backup.delete()
class ResizeVolumeAction(ConfigurationMixin):
"""Performs volume resize action."""
def __init__(self, instance, old_size, new_size):
self.instance = instance
self.old_size = int(old_size)
self.new_size = int(new_size)
def _fail(self, orig_func):
LOG.exception(_("%(func)s encountered an error when attempting to "
"resize the volume for instance %(id)s. Setting service "
"status to failed.") % {'func': orig_func.__name__,
'id': self.instance.id})
service = InstanceServiceStatus.find_by(instance_id=self.instance.id)
service.set_status(ServiceStatuses.FAILED)
service.save()
def _recover_restart(self, orig_func):
LOG.exception(_("%(func)s encountered an error when attempting to "
"resize the volume for instance %(id)s. Trying to "
"recover by restarting the guest.") % {
'func': orig_func.__name__,
'id': self.instance.id})
self.instance.restart()
def _recover_mount_restart(self, orig_func):
LOG.exception(_("%(func)s encountered an error when attempting to "
"resize the volume for instance %(id)s. Trying to "
"recover by mounting the volume and then restarting the "
"guest.") % {'func': orig_func.__name__,
'id': self.instance.id})
self._mount_volume()
self.instance.restart()
def _recover_full(self, orig_func):
LOG.exception(_("%(func)s encountered an error when attempting to "
"resize the volume for instance %(id)s. Trying to "
"recover by attaching and mounting the volume and then "
"restarting the guest.") % {'func': orig_func.__name__,
'id': self.instance.id})
self._attach_volume()
self._mount_volume()
self.instance.restart()
def _stop_db(self):
LOG.debug(_("Instance %s calling stop_db.") % self.instance.id)
self.instance.guest.stop_db()
@try_recover
def _unmount_volume(self):
LOG.debug(_("Unmounting the volume on instance %(id)s") % {
'id': self.instance.id})
self.instance.guest.unmount_volume(device_path=CONF.device_path,
mount_point=CONF.mount_point)
LOG.debug(_("Successfully unmounted the volume %(vol_id)s for "
"instance %(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _detach_volume(self):
LOG.debug(_("Detach volume %(vol_id)s from instance %(id)s") % {
'vol_id': self.instance.volume_id,
'id': self.instance.id})
self.instance.volume_client.volumes.detach(self.instance.volume_id)
def volume_available():
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
return volume.status == 'available'
utils.poll_until(volume_available,
sleep_time=2,
time_out=CONF.volume_time_out)
LOG.debug(_("Successfully detached volume %(vol_id)s from instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _attach_volume(self):
LOG.debug(_("Attach volume %(vol_id)s to instance %(id)s at "
"%(dev)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id, 'dev': CONF.device_path})
self.instance.volume_client.volumes.attach(self.instance.volume_id,
self.instance.server.id,
CONF.device_path)
def volume_in_use():
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
return volume.status == 'in-use'
utils.poll_until(volume_in_use,
sleep_time=2,
time_out=CONF.volume_time_out)
LOG.debug(_("Successfully attached volume %(vol_id)s to instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _resize_fs(self):
LOG.debug(_("Resizing the filesystem for instance %(id)s") % {
'id': self.instance.id})
self.instance.guest.resize_fs(device_path=CONF.device_path,
mount_point=CONF.mount_point)
LOG.debug(_("Successfully resized volume %(vol_id)s filesystem for "
"instance %(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _mount_volume(self):
LOG.debug(_("Mount the volume on instance %(id)s") % {
'id': self.instance.id})
self.instance.guest.mount_volume(device_path=CONF.device_path,
mount_point=CONF.mount_point)
LOG.debug(_("Successfully mounted the volume %(vol_id)s on instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _extend(self):
LOG.debug(_("Extending volume %(vol_id)s for instance %(id)s to "
"size %(size)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id, 'size': self.new_size})
self.instance.volume_client.volumes.extend(self.instance.volume_id,
self.new_size)
LOG.debug(_("Successfully extended the volume %(vol_id)s for instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
def _verify_extend(self):
try:
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
if not volume:
msg = (_('Failed to get volume %(vol_id)s') % {
'vol_id': self.instance.volume_id})
raise cinder_exceptions.ClientException(msg)
def volume_is_new_size():
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
return volume.size == self.new_size
utils.poll_until(volume_is_new_size,
sleep_time=2,
time_out=CONF.volume_time_out)
self.instance.update_db(volume_size=self.new_size)
except PollTimeOut:
LOG.exception(_("Timeout trying to extend the volume %(vol_id)s "
"for instance %(id)s") % {
'vol_id': self.instance.volume_id,
'id': self.instance.id})
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
if volume.status == 'extending':
self._fail(self._verify_extend)
elif volume.size != self.new_size:
self.instance.update_db(volume_size=volume.size)
self._recover_full(self._verify_extend)
raise
except Exception:
LOG.exception(_("Error encountered trying to verify extend for "
"the volume %(vol_id)s for instance %(id)s") % {
'vol_id': self.instance.volume_id,
'id': self.instance.id})
self._recover_full(self._verify_extend)
raise
def _resize_active_volume(self):
LOG.debug(_("begin _resize_active_volume for id: %(id)s") % {
'id': self.instance.id})
self._stop_db()
self._unmount_volume(recover_func=self._recover_restart)
self._detach_volume(recover_func=self._recover_mount_restart)
self._extend(recover_func=self._recover_full)
self._verify_extend()
# if anything fails after this point, recovery is futile
self._attach_volume(recover_func=self._fail)
self._resize_fs(recover_func=self._fail)
self._mount_volume(recover_func=self._fail)
self.instance.restart()
LOG.debug(_("end _resize_active_volume for id: %(id)s") % {
'id': self.instance.id})
def execute(self):
LOG.debug(_("%(gt)s: Resizing instance %(id)s volume for server "
"%(server_id)s from %(old_volume_size)s to "
"%(new_size)r GB") % {'gt': greenthread.getcurrent(),
'id': self.instance.id,
'server_id': self.instance.server.id,
'old_volume_size': self.old_size,
'new_size': self.new_size})
if self.instance.server.status == InstanceStatus.ACTIVE:
self._resize_active_volume()
self.instance.update_db(task_status=inst_models.InstanceTasks.NONE)
# send usage event for size reported by cinder
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
launched_time = timeutils.isotime(self.instance.updated)
modified_time = timeutils.isotime(self.instance.updated)
self.instance.send_usage_event('modify_volume',
old_volume_size=self.old_size,
launched_at=launched_time,
modify_at=modified_time,
volume_size=volume.size)
else:
self.instance.update_db(task_status=inst_models.InstanceTasks.NONE)
msg = _("Volume resize failed for instance %(id)s. The instance "
"must be in state %(state)s not %(inst_state)s.") % {
'id': self.instance.id,
'state': InstanceStatus.ACTIVE,
'inst_state': self.instance.server.status}
raise TroveError(msg)
class ResizeActionBase(ConfigurationMixin):
"""Base class for executing a resize action."""

View File

@ -308,6 +308,15 @@ class FakeGuest(object):
backup.save()
eventlet.spawn_after(1.0, finish_create_backup)
def mount_volume(self, device_path=None, mount_point=None):
pass
def unmount_volume(self, device_path=None, mount_point=None):
pass
def resize_fs(self, device_path=None, mount_point=None):
pass
def get_or_create(id):
if id not in DB:

View File

@ -113,6 +113,7 @@ class FakeServer(object):
for volume in self.volumes:
info_vols.append({'id': volume.id})
volume.set_attachment(id)
volume.schedule_status("in-use", 1)
self.host = FAKE_HOSTS[0]
self.old_host = None
setattr(self, 'OS-EXT-AZ:availability_zone', 'nova')
@ -512,6 +513,16 @@ class FakeVolumes(object):
volume._current_status = "available"
eventlet.spawn_after(1.0, finish_detach)
def attach(self, volume_id, server_id, device_path):
volume = self.get(volume_id)
if volume._current_status != "available":
raise Exception("Invalid volume status")
def finish_attach():
volume._current_status = "in-use"
eventlet.spawn_after(1.0, finish_attach)
class FakeAccount(object):

View File

@ -38,18 +38,19 @@ class VolumeDeviceTest(testtools.TestCase):
def test_migrate_data(self):
origin_execute = utils.execute
utils.execute = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
fake_spawn = _setUp_fake_spawn()
origin_tmp_mount = self.volumeDevice._tmp_mount
origin_unmount = self.volumeDevice.unmount
self.volumeDevice._tmp_mount = MagicMock()
self.volumeDevice.unmount = MagicMock()
self.volumeDevice.migrate_data('/')
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(1, self.volumeDevice._tmp_mount.call_count)
self.assertEqual(1, self.volumeDevice.unmount.call_count)
utils.execute = origin_execute
self.volumeDevice._tmp_mount = origin_tmp_mount
self.volumeDevice.unmount = origin_unmount
os.path.exists = origin_os_path_exists
def test__check_device_exists(self):
origin_execute = utils.execute
@ -98,6 +99,8 @@ class VolumeDeviceTest(testtools.TestCase):
def test_mount(self):
origin_ = volume.VolumeMountPoint.mount
volume.VolumeMountPoint.mount = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
volume.VolumeMountPoint.write_to_fstab = Mock()
@ -106,28 +109,24 @@ class VolumeDeviceTest(testtools.TestCase):
self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
volume.VolumeMountPoint.mount = origin_
volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
os.path.exists = origin_os_path_exists
def test_resize_fs(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists = MagicMock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
self.volumeDevice.resize_fs()
self.volumeDevice.resize_fs('/mnt/volume')
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(2, utils.execute.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
os.path.exists = origin_os_path_exists
utils.execute = origin_execute
def test__tmp_mount(self):
origin_ = volume.VolumeMountPoint.mount
volume.VolumeMountPoint.mount = Mock()
self.volumeDevice._tmp_mount(Mock)
self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
volume.VolumeMountPoint.mount = origin_
def test_unmount_positive(self):
self._test_unmount()
@ -139,7 +138,7 @@ class VolumeDeviceTest(testtools.TestCase):
os.path.exists = MagicMock(return_value=positive)
fake_spawn = _setUp_fake_spawn()
self.volumeDevice.unmount()
self.volumeDevice.unmount('/mnt/volume')
COUNT = 1
if not positive:
COUNT = 0

View File

@ -15,19 +15,24 @@ import testtools
from mock import Mock
from testtools.matchers import Equals
from mockito import mock, when, unstub, any, verify, never
from cinderclient import exceptions as cinder_exceptions
from trove.datastore import models as datastore_models
from trove.taskmanager import models as taskmanager_models
from trove.backup import models as backup_models
from trove.common import remote
from trove.common.exception import GuestError
from trove.common.exception import PollTimeOut
from trove.common.exception import TroveError
from trove.common.instance import ServiceStatuses
from trove.extensions.mysql import models as mysql_models
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceStatus
from trove.instance.models import DBInstance
from trove.instance.tasks import InstanceTasks
from trove.tests.unittests.util import util
from trove.common import utils
from trove.openstack.common import timeutils
from swiftclient.client import ClientException
from tempfile import NamedTemporaryFile
import os
@ -224,6 +229,92 @@ class FreshInstanceTasksTest(testtools.TestCase):
InstanceTasks.BUILDING_ERROR_TIMEOUT_GA)
class ResizeVolumeTest(testtools.TestCase):
def setUp(self):
super(ResizeVolumeTest, self).setUp()
utils.poll_until = Mock()
timeutils.isotime = Mock()
self.instance = Mock()
self.old_vol_size = 1
self.new_vol_size = 2
self.action = taskmanager_models.ResizeVolumeAction(self.instance,
self.old_vol_size,
self.new_vol_size)
def tearDown(self):
super(ResizeVolumeTest, self).tearDown()
def test_resize_volume_unmount_exception(self):
self.instance.guest.unmount_volume = Mock(
side_effect=GuestError("test exception"))
self.assertRaises(GuestError,
self.action._unmount_volume,
recover_func=self.action._recover_restart)
self.assertEqual(1, self.instance.restart.call_count)
self.instance.guest.unmount_volume.side_effect = None
self.instance.reset_mock()
def test_resize_volume_detach_exception(self):
self.instance.volume_client.volumes.detach = Mock(
side_effect=cinder_exceptions.ClientException("test exception"))
self.assertRaises(cinder_exceptions.ClientException,
self.action._detach_volume,
recover_func=self.action._recover_mount_restart)
self.assertEqual(1, self.instance.guest.mount_volume.call_count)
self.assertEqual(1, self.instance.restart.call_count)
self.instance.volume_client.volumes.detach.side_effect = None
self.instance.reset_mock()
def test_resize_volume_extend_exception(self):
self.instance.volume_client.volumes.extend = Mock(
side_effect=cinder_exceptions.ClientException("test exception"))
self.assertRaises(cinder_exceptions.ClientException,
self.action._extend,
recover_func=self.action._recover_full)
attach_count = self.instance.volume_client.volumes.attach.call_count
self.assertEqual(1, attach_count)
self.assertEqual(1, self.instance.guest.mount_volume.call_count)
self.assertEqual(1, self.instance.restart.call_count)
self.instance.volume_client.volumes.extend.side_effect = None
self.instance.reset_mock()
def test_resize_volume_verify_extend_no_volume(self):
self.instance.volume_client.volumes.get = Mock(return_value=None)
self.assertRaises(cinder_exceptions.ClientException,
self.action._verify_extend)
self.instance.reset_mock()
def test_resize_volume_poll_timeout(self):
utils.poll_until = Mock(side_effect=PollTimeOut)
self.assertRaises(PollTimeOut, self.action._verify_extend)
self.assertEqual(2, self.instance.volume_client.volumes.get.call_count)
utils.poll_until.side_effect = None
self.instance.reset_mock()
def test_resize_volume_active_server_succeeds(self):
server = Mock(status=InstanceStatus.ACTIVE)
self.instance.attach_mock(server, 'server')
self.action.execute()
self.assertEqual(1, self.instance.guest.stop_db.call_count)
self.assertEqual(1, self.instance.guest.unmount_volume.call_count)
detach_count = self.instance.volume_client.volumes.detach.call_count
self.assertEqual(1, detach_count)
extend_count = self.instance.volume_client.volumes.extend.call_count
self.assertEqual(1, extend_count)
attach_count = self.instance.volume_client.volumes.attach.call_count
self.assertEqual(1, attach_count)
self.assertEqual(1, self.instance.guest.resize_fs.call_count)
self.assertEqual(1, self.instance.guest.mount_volume.call_count)
self.assertEqual(1, self.instance.restart.call_count)
self.instance.reset_mock()
def test_resize_volume_server_error_fails(self):
server = Mock(status=InstanceStatus.ERROR)
self.instance.attach_mock(server, 'server')
self.assertRaises(TroveError, self.action.execute)
self.instance.reset_mock()
class BackupTasksTest(testtools.TestCase):
def setUp(self):
super(BackupTasksTest, self).setUp()