diff --git a/.zuul.yaml b/.zuul.yaml index beb1bd21e10..b974bae9d30 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -132,6 +132,11 @@ vars: zuul_additional_subunit_dirs: - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}" + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + volume-feature-enabled: + volume_revert: True - job: name: cinder-grenade-dsvm-mn-sub-bak diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py index 543588bb0e8..59c6d37742d 100644 --- a/cinder/tests/unit/volume/drivers/test_rbd.py +++ b/cinder/tests/unit/volume/drivers/test_rbd.py @@ -960,6 +960,17 @@ class RBDTestCase(test.TestCase): self.assertTrue(proxy.unprotect_snap.called) self.assertFalse(proxy.remove_snap.called) + @common_mocks + def test_snapshot_revert_use_temp_snapshot(self): + self.assertFalse(self.driver.snapshot_revert_use_temp_snapshot()) + + @common_mocks + def test_revert_to_snapshot(self): + image = self.mock_proxy.return_value.__enter__.return_value + self.driver.revert_to_snapshot(self.context, self.volume_a, + self.snapshot) + image.rollback_to_snap.assert_called_once_with(self.snapshot.name) + @common_mocks def test_get_children_info(self): volume = self.mock_proxy diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index e57ad59bac9..a463ebb0a48 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -1232,6 +1232,32 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD, LOG.info("Snapshot %s does not exist in backend.", snap_name) + def snapshot_revert_use_temp_snapshot(self): + """Disable the use of a temporary snapshot on revert.""" + return False + + def revert_to_snapshot(self, context, volume, snapshot): + """Revert a volume to a given snapshot.""" + # NOTE(rosmaita): The Ceph documentation notes that this operation is + # inefficient on the backend for large volumes, and that the preferred + # method of returning to a pre-existing state in Ceph is to clone from + # a snapshot. + # So why don't we do something like that here? + # (a) an end user can do the more efficient operation on their own if + # they value speed over the convenience of reverting their existing + # volume + # (b) revert-to-snapshot is properly a backend operation, and should + # be handled by the backend -- trying to "fake it" in this driver + # is both dishonest and likely to cause subtle bugs + # (c) the Ceph project undergoes continual improvement. It may be + # the case that there are things an operator can do on the Ceph + # side (for example, use BlueStore for the Ceph backend storage) + # to improve the efficiency of this operation. + # Thus, a motivated operator reading this is encouraged to consult + # the Ceph documentation. + with RBDVolumeProxy(self, volume.name) as image: + image.rollback_to_snap(snapshot.name) + def _disable_replication(self, volume): """Disable replication on the given volume.""" vol_name = utils.convert_str(volume.name) diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index f9e5b0ba3b2..c74a73e1481 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -812,7 +812,7 @@ driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing -driver.rbd=missing +driver.rbd=complete driver.seagate=missing driver.storpool=missing driver.synology=missing diff --git a/releasenotes/notes/rbd-support-revert-to-snapshot-c9ca62c9efbabf5f.yaml b/releasenotes/notes/rbd-support-revert-to-snapshot-c9ca62c9efbabf5f.yaml new file mode 100644 index 00000000000..ba005c546e4 --- /dev/null +++ b/releasenotes/notes/rbd-support-revert-to-snapshot-c9ca62c9efbabf5f.yaml @@ -0,0 +1,36 @@ +--- +features: + - | + RBD driver: support added for reverting a volume to the most recent + snapshot taken. + + Please be aware of the following known issues with this operation + and the Ceph storage backend: + + * Rolling back a volume to a snapshot overwrites the current volume + with the data from the snapshot, and the time it takes to complete + this operation increases with the size of the volume. + + It is faster to create a new volume from a snapshot. You may + wish to recommend this option to your users whose use cases do not + strictly require revert-to-snapshot. + + * The efficiency of revert-to-snapshot is also dependent upon the + Ceph storage backend in use, namely, whether or not BlueStore is + being used in your Ceph installation. + + Please consult the Ceph documentation for details. +issues: + - | + RBD driver: There are some known issues concerning the revert-to-snapshot + support added in this release. + + * The time it takes to complete the revert-to-snapshot operation increases + with the size of the volume. It is faster to create a new volume from + a snapshot. + + * The efficiency of revert-to-snapshot depends upon the Ceph storage + backend in use, particularly whether or not BlueStore is being used + in your Ceph installation. + + Please consult the Ceph documentation for details.