Merge "Support Glance image data colocation"
This commit is contained in:
commit
f74c533382
@ -217,13 +217,17 @@ class GlanceClientWrapper(object):
|
|||||||
glanceclient.exc.CommunicationError)
|
glanceclient.exc.CommunicationError)
|
||||||
num_attempts = 1 + CONF.glance_num_retries
|
num_attempts = 1 + CONF.glance_num_retries
|
||||||
store_id = kwargs.pop('store_id', None)
|
store_id = kwargs.pop('store_id', None)
|
||||||
|
base_image_ref = kwargs.pop('base_image_ref', None)
|
||||||
|
|
||||||
for attempt in range(1, num_attempts + 1):
|
for attempt in range(1, num_attempts + 1):
|
||||||
client = self.client or self._create_onetime_client(context)
|
client = self.client or self._create_onetime_client(context)
|
||||||
if store_id:
|
|
||||||
client.http_client.additional_headers = {
|
keys = ('x-image-meta-store', 'x-openstack-base-image-ref',)
|
||||||
'x-image-meta-store': store_id
|
values = (store_id, base_image_ref,)
|
||||||
}
|
|
||||||
|
headers = {k: v for (k, v) in zip(keys, values) if v is not None}
|
||||||
|
if headers:
|
||||||
|
client.http_client.additional_headers = headers
|
||||||
|
|
||||||
try:
|
try:
|
||||||
controller = getattr(client,
|
controller = getattr(client,
|
||||||
@ -395,7 +399,7 @@ class GlanceImageService(object):
|
|||||||
|
|
||||||
def update(self, context, image_id,
|
def update(self, context, image_id,
|
||||||
image_meta, data=None, purge_props=True,
|
image_meta, data=None, purge_props=True,
|
||||||
store_id=None):
|
store_id=None, base_image_ref=None):
|
||||||
"""Modify the given image with the new data."""
|
"""Modify the given image with the new data."""
|
||||||
# For v2, _translate_to_glance stores custom properties in image meta
|
# For v2, _translate_to_glance stores custom properties in image meta
|
||||||
# directly. We need the custom properties to identify properties to
|
# directly. We need the custom properties to identify properties to
|
||||||
@ -412,6 +416,8 @@ class GlanceImageService(object):
|
|||||||
kwargs = {}
|
kwargs = {}
|
||||||
if store_id:
|
if store_id:
|
||||||
kwargs['store_id'] = store_id
|
kwargs['store_id'] = store_id
|
||||||
|
if base_image_ref:
|
||||||
|
kwargs['base_image_ref'] = base_image_ref
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if data:
|
if data:
|
||||||
|
@ -681,7 +681,7 @@ def _validate_file_format(image_data, expected_format):
|
|||||||
|
|
||||||
def upload_volume(context, image_service, image_meta, volume_path,
|
def upload_volume(context, image_service, image_meta, volume_path,
|
||||||
volume_format='raw', run_as_root=True, compress=True,
|
volume_format='raw', run_as_root=True, compress=True,
|
||||||
store_id=None):
|
store_id=None, base_image_ref=None):
|
||||||
image_id = image_meta['id']
|
image_id = image_meta['id']
|
||||||
if image_meta.get('container_format') != 'compressed':
|
if image_meta.get('container_format') != 'compressed':
|
||||||
if (image_meta['disk_format'] == volume_format):
|
if (image_meta['disk_format'] == volume_format):
|
||||||
@ -691,13 +691,15 @@ def upload_volume(context, image_service, image_meta, volume_path,
|
|||||||
with open(volume_path, 'rb') as image_file:
|
with open(volume_path, 'rb') as image_file:
|
||||||
image_service.update(context, image_id, {},
|
image_service.update(context, image_id, {},
|
||||||
tpool.Proxy(image_file),
|
tpool.Proxy(image_file),
|
||||||
store_id=store_id)
|
store_id=store_id,
|
||||||
|
base_image_ref=base_image_ref)
|
||||||
else:
|
else:
|
||||||
with utils.temporary_chown(volume_path):
|
with utils.temporary_chown(volume_path):
|
||||||
with open(volume_path, 'rb') as image_file:
|
with open(volume_path, 'rb') as image_file:
|
||||||
image_service.update(context, image_id, {},
|
image_service.update(context, image_id, {},
|
||||||
tpool.Proxy(image_file),
|
tpool.Proxy(image_file),
|
||||||
store_id=store_id)
|
store_id=store_id,
|
||||||
|
base_image_ref=base_image_ref)
|
||||||
return
|
return
|
||||||
|
|
||||||
with temporary_file() as tmp:
|
with temporary_file() as tmp:
|
||||||
@ -740,7 +742,8 @@ def upload_volume(context, image_service, image_meta, volume_path,
|
|||||||
with open(tmp, 'rb') as image_file:
|
with open(tmp, 'rb') as image_file:
|
||||||
image_service.update(context, image_id, {},
|
image_service.update(context, image_id, {},
|
||||||
tpool.Proxy(image_file),
|
tpool.Proxy(image_file),
|
||||||
store_id=store_id)
|
store_id=store_id,
|
||||||
|
base_image_ref=base_image_ref)
|
||||||
|
|
||||||
|
|
||||||
def check_virtual_size(virtual_size, volume_size, image_id):
|
def check_virtual_size(virtual_size, volume_size, image_id):
|
||||||
|
@ -212,7 +212,7 @@ class _FakeImageService(object):
|
|||||||
return self.images[image_id]
|
return self.images[image_id]
|
||||||
|
|
||||||
def update(self, context, image_id, metadata, data=None,
|
def update(self, context, image_id, metadata, data=None,
|
||||||
purge_props=False, store_id=None):
|
purge_props=False, store_id=None, base_image_ref=None):
|
||||||
"""Replace the contents of the given image with the new data.
|
"""Replace the contents of the given image with the new data.
|
||||||
|
|
||||||
:raises ImageNotFound: if the image does not exist.
|
:raises ImageNotFound: if the image does not exist.
|
||||||
|
@ -537,6 +537,36 @@ class TestGlanceImageService(test.TestCase):
|
|||||||
client.call.assert_called_once_with(
|
client.call.assert_called_once_with(
|
||||||
self.context, 'update', image_id, k1='v1', remove_props=['k2'])
|
self.context, 'update', image_id, k1='v1', remove_props=['k2'])
|
||||||
|
|
||||||
|
@mock.patch.object(glance.GlanceImageService, '_translate_from_glance')
|
||||||
|
@mock.patch.object(glance.GlanceImageService, 'show')
|
||||||
|
def test_update_base_image_ref(self, show, translate_from_glance):
|
||||||
|
image_id = mock.sentinel.image_id
|
||||||
|
client = mock.Mock(call=mock.Mock())
|
||||||
|
service = glance.GlanceImageService(client=client)
|
||||||
|
data = '*' * 256
|
||||||
|
show.return_value = {}
|
||||||
|
translate_from_glance.return_value = {}
|
||||||
|
|
||||||
|
service.update(self.context, image_id, {}, data,
|
||||||
|
base_image_ref=123)
|
||||||
|
calls = [mock.call.call(
|
||||||
|
self.context, 'upload', image_id, data, base_image_ref=123),
|
||||||
|
mock.call.call(self.context, 'get', image_id)]
|
||||||
|
client.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
|
def test_call_with_additional_headers(self):
|
||||||
|
glance_wrapper = glance.GlanceClientWrapper()
|
||||||
|
fake_client = mock.Mock()
|
||||||
|
self.mock_object(glance_wrapper, 'client', fake_client)
|
||||||
|
glance_wrapper.call(self.context, 'upload',
|
||||||
|
{},
|
||||||
|
store_id='xyz',
|
||||||
|
base_image_ref=123)
|
||||||
|
self.assertDictEqual({
|
||||||
|
'x-image-meta-store': 'xyz',
|
||||||
|
'x-openstack-base-image-ref': 123},
|
||||||
|
fake_client.http_client.additional_headers)
|
||||||
|
|
||||||
def test_delete(self):
|
def test_delete(self):
|
||||||
fixture1 = self._make_fixture(name='test image 1')
|
fixture1 = self._make_fixture(name='test image 1')
|
||||||
fixture2 = self._make_fixture(name='test image 2')
|
fixture2 = self._make_fixture(name='test image 2')
|
||||||
|
@ -763,7 +763,7 @@ class TestUploadVolume(test.TestCase):
|
|||||||
mock_open.return_value.__enter__.return_value)
|
mock_open.return_value.__enter__.return_value)
|
||||||
image_service.update.assert_called_once_with(
|
image_service.update.assert_called_once_with(
|
||||||
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None)
|
||||||
|
|
||||||
@mock.patch('eventlet.tpool.Proxy')
|
@mock.patch('eventlet.tpool.Proxy')
|
||||||
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
|
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
|
||||||
@ -796,7 +796,7 @@ class TestUploadVolume(test.TestCase):
|
|||||||
mock_open.return_value.__enter__.return_value)
|
mock_open.return_value.__enter__.return_value)
|
||||||
image_service.update.assert_called_once_with(
|
image_service.update.assert_called_once_with(
|
||||||
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None)
|
||||||
|
|
||||||
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
|
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
|
||||||
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
|
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
|
||||||
@ -852,7 +852,7 @@ class TestUploadVolume(test.TestCase):
|
|||||||
mock_open.return_value.__enter__.return_value)
|
mock_open.return_value.__enter__.return_value)
|
||||||
image_service.update.assert_called_once_with(
|
image_service.update.assert_called_once_with(
|
||||||
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None)
|
||||||
mock_engine.compress_img.assert_called()
|
mock_engine.compress_img.assert_called()
|
||||||
|
|
||||||
@mock.patch('eventlet.tpool.Proxy')
|
@mock.patch('eventlet.tpool.Proxy')
|
||||||
@ -886,7 +886,7 @@ class TestUploadVolume(test.TestCase):
|
|||||||
mock_open.return_value.__enter__.return_value)
|
mock_open.return_value.__enter__.return_value)
|
||||||
image_service.update.assert_called_once_with(
|
image_service.update.assert_called_once_with(
|
||||||
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None)
|
||||||
|
|
||||||
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
|
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
|
||||||
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
|
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
|
||||||
@ -943,7 +943,7 @@ class TestUploadVolume(test.TestCase):
|
|||||||
mock_open.return_value.__enter__.return_value)
|
mock_open.return_value.__enter__.return_value)
|
||||||
image_service.update.assert_called_once_with(
|
image_service.update.assert_called_once_with(
|
||||||
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None)
|
||||||
mock_engine.compress_img.assert_called()
|
mock_engine.compress_img.assert_called()
|
||||||
|
|
||||||
@mock.patch('cinder.image.image_utils.CONF')
|
@mock.patch('cinder.image.image_utils.CONF')
|
||||||
@ -978,6 +978,32 @@ class TestUploadVolume(test.TestCase):
|
|||||||
self.assertEqual(2, mock_info.call_count)
|
self.assertEqual(2, mock_info.call_count)
|
||||||
self.assertFalse(image_service.update.called)
|
self.assertFalse(image_service.update.called)
|
||||||
|
|
||||||
|
@mock.patch('eventlet.tpool.Proxy')
|
||||||
|
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
|
||||||
|
@mock.patch('cinder.image.image_utils.CONF')
|
||||||
|
@mock.patch('six.moves.builtins.open')
|
||||||
|
@mock.patch('cinder.image.image_utils.qemu_img_info')
|
||||||
|
@mock.patch('cinder.image.image_utils.convert_image')
|
||||||
|
@mock.patch('cinder.image.image_utils.temporary_file')
|
||||||
|
@mock.patch('cinder.image.image_utils.os')
|
||||||
|
def test_base_image_ref(self, mock_os, mock_temp, mock_convert, mock_info,
|
||||||
|
mock_open, mock_conf, mock_chown, mock_proxy):
|
||||||
|
ctxt = mock.sentinel.context
|
||||||
|
image_service = mock.Mock()
|
||||||
|
image_meta = {'id': 'test_id',
|
||||||
|
'disk_format': 'raw',
|
||||||
|
'container_format': mock.sentinel.container_format}
|
||||||
|
volume_path = mock.sentinel.volume_path
|
||||||
|
mock_os.name = 'posix'
|
||||||
|
mock_os.access.return_value = False
|
||||||
|
|
||||||
|
image_utils.upload_volume(ctxt, image_service, image_meta,
|
||||||
|
volume_path, base_image_ref='xyz')
|
||||||
|
|
||||||
|
image_service.update.assert_called_once_with(
|
||||||
|
ctxt, image_meta['id'], {}, mock_proxy.return_value,
|
||||||
|
store_id=None, base_image_ref='xyz')
|
||||||
|
|
||||||
|
|
||||||
class TestFetchToVhd(test.TestCase):
|
class TestFetchToVhd(test.TestCase):
|
||||||
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
|
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
|
||||||
|
@ -1271,7 +1271,8 @@ class QuobyteDriverTestCase(test.TestCase):
|
|||||||
self.assertEqual(self.TEST_MNT_POINT_BASE,
|
self.assertEqual(self.TEST_MNT_POINT_BASE,
|
||||||
conn_info['mount_point_base'])
|
conn_info['mount_point_base'])
|
||||||
|
|
||||||
def test_copy_volume_to_image_raw_image(self):
|
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
|
||||||
|
def test_copy_volume_to_image_raw_image(self, vol_glance_metadata):
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
volume_type_id = db.volume_type_create(
|
volume_type_id = db.volume_type_create(
|
||||||
@ -1315,10 +1316,12 @@ class QuobyteDriverTestCase(test.TestCase):
|
|||||||
run_as_root=False)
|
run_as_root=False)
|
||||||
mock_upload_volume.assert_called_once_with(
|
mock_upload_volume.assert_called_once_with(
|
||||||
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None, compress=True,
|
||||||
|
volume_format='raw')
|
||||||
self.assertTrue(mock_create_temporary_file.called)
|
self.assertTrue(mock_create_temporary_file.called)
|
||||||
|
|
||||||
def test_copy_volume_to_image_qcow2_image(self):
|
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
|
||||||
|
def test_copy_volume_to_image_qcow2_image(self, vol_glance_metadata):
|
||||||
"""Upload a qcow2 image file which has to be converted to raw first."""
|
"""Upload a qcow2 image file which has to be converted to raw first."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -1367,10 +1370,12 @@ class QuobyteDriverTestCase(test.TestCase):
|
|||||||
volume_path, upload_path, 'raw', run_as_root=False)
|
volume_path, upload_path, 'raw', run_as_root=False)
|
||||||
mock_upload_volume.assert_called_once_with(
|
mock_upload_volume.assert_called_once_with(
|
||||||
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None, compress=True,
|
||||||
|
volume_format='raw')
|
||||||
self.assertTrue(mock_create_temporary_file.called)
|
self.assertTrue(mock_create_temporary_file.called)
|
||||||
|
|
||||||
def test_copy_volume_to_image_snapshot_exists(self):
|
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
|
||||||
|
def test_copy_volume_to_image_snapshot_exists(self, vol_glance_metadata):
|
||||||
"""Upload an active snapshot which has to be converted to raw first."""
|
"""Upload an active snapshot which has to be converted to raw first."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -1421,7 +1426,8 @@ class QuobyteDriverTestCase(test.TestCase):
|
|||||||
volume_path, upload_path, 'raw', run_as_root=False)
|
volume_path, upload_path, 'raw', run_as_root=False)
|
||||||
mock_upload_volume.assert_called_once_with(
|
mock_upload_volume.assert_called_once_with(
|
||||||
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
|
||||||
store_id=None)
|
store_id=None, base_image_ref=None, compress=True,
|
||||||
|
volume_format='raw')
|
||||||
self.assertTrue(mock_create_temporary_file.called)
|
self.assertTrue(mock_create_temporary_file.called)
|
||||||
|
|
||||||
def test_set_nas_security_options_default(self):
|
def test_set_nas_security_options_default(self):
|
||||||
|
@ -451,7 +451,8 @@ class VMwareVStorageObjectDriverTestCase(test.TestCase):
|
|||||||
vmdk_file_path=vmdk_file_path,
|
vmdk_file_path=vmdk_file_path,
|
||||||
vmdk_size=volume.size * units.Gi,
|
vmdk_size=volume.size * units.Gi,
|
||||||
image_name=image_meta['name'],
|
image_name=image_meta['name'],
|
||||||
store_id='fake-store')
|
store_id='fake-store',
|
||||||
|
base_image_ref=None)
|
||||||
vops.detach_fcd.assert_called_once_with(backing, fcd_loc)
|
vops.detach_fcd.assert_called_once_with(backing, fcd_loc)
|
||||||
delete_temp_backing.assert_called_once_with(backing)
|
delete_temp_backing.assert_called_once_with(backing)
|
||||||
|
|
||||||
|
@ -1291,6 +1291,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
|
|||||||
host=self._config.vmware_host_ip,
|
host=self._config.vmware_host_ip,
|
||||||
port=self._config.vmware_host_port,
|
port=self._config.vmware_host_port,
|
||||||
store_id='fake-store',
|
store_id='fake-store',
|
||||||
|
base_image_ref=None,
|
||||||
vm=backing,
|
vm=backing,
|
||||||
vmdk_file_path=vmdk_file_path,
|
vmdk_file_path=vmdk_file_path,
|
||||||
vmdk_size=volume['size'] * units.Gi,
|
vmdk_size=volume['size'] * units.Gi,
|
||||||
|
@ -415,8 +415,9 @@ class TestWindowsISCSIDriver(test.TestCase):
|
|||||||
expected_tmp_vhd_path)
|
expected_tmp_vhd_path)
|
||||||
mock_upload_volume.assert_called_once_with(
|
mock_upload_volume.assert_called_once_with(
|
||||||
mock.sentinel.context, mock.sentinel.image_service,
|
mock.sentinel.context, mock.sentinel.image_service,
|
||||||
fake_image_meta, expected_tmp_vhd_path, 'vhd',
|
fake_image_meta, expected_tmp_vhd_path, volume_format='vhd',
|
||||||
store_id='fake-store')
|
store_id='fake-store', base_image_ref=None,
|
||||||
|
compress=True, run_as_root=True)
|
||||||
mock_delete_if_exists.assert_called_once_with(
|
mock_delete_if_exists.assert_called_once_with(
|
||||||
expected_tmp_vhd_path)
|
expected_tmp_vhd_path)
|
||||||
|
|
||||||
|
@ -788,8 +788,9 @@ class WindowsSmbFsTestCase(test.TestCase):
|
|||||||
|
|
||||||
fake_upload_volume.assert_called_once_with(
|
fake_upload_volume.assert_called_once_with(
|
||||||
mock.sentinel.context, mock.sentinel.image_service,
|
mock.sentinel.context, mock.sentinel.image_service,
|
||||||
fake_image_meta, upload_path, fake_img_format,
|
fake_image_meta, upload_path, volume_format=fake_img_format,
|
||||||
store_id='fake-store')
|
store_id='fake-store', base_image_ref=None, compress=True,
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
|
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
|
||||||
def test_copy_image_to_volume(self, mock_get_vhd_type):
|
def test_copy_image_to_volume(self, mock_get_vhd_type):
|
||||||
|
@ -36,6 +36,7 @@ from cinder.volume import configuration
|
|||||||
from cinder.volume import driver_utils
|
from cinder.volume import driver_utils
|
||||||
from cinder.volume import rpcapi as volume_rpcapi
|
from cinder.volume import rpcapi as volume_rpcapi
|
||||||
from cinder.volume import throttling
|
from cinder.volume import throttling
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -902,16 +903,13 @@ class BaseVD(object):
|
|||||||
enforce_multipath)
|
enforce_multipath)
|
||||||
attach_info, volume = self._attach_volume(context, volume, properties)
|
attach_info, volume = self._attach_volume(context, volume, properties)
|
||||||
|
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
image_utils.upload_volume(context,
|
volume_utils.upload_volume(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
attach_info['device']['path'],
|
attach_info['device']['path'],
|
||||||
compress=True,
|
volume,
|
||||||
store_id=store_id)
|
compress=True)
|
||||||
finally:
|
finally:
|
||||||
# Since attached volume was not used for writing we can force
|
# Since attached volume was not used for writing we can force
|
||||||
# detach it
|
# detach it
|
||||||
|
@ -1244,14 +1244,12 @@ class VxFlexOSDriver(driver.VolumeDriver):
|
|||||||
"service": image_service,
|
"service": image_service,
|
||||||
"meta": image_meta,
|
"meta": image_meta,
|
||||||
})
|
})
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
try:
|
try:
|
||||||
image_utils.upload_volume(context,
|
volume_utils.upload_volume(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
self._sio_attach_volume(volume),
|
self._sio_attach_volume(volume),
|
||||||
store_id=store_id)
|
volume)
|
||||||
finally:
|
finally:
|
||||||
self._sio_detach_volume(volume)
|
self._sio_detach_volume(volume)
|
||||||
|
|
||||||
|
@ -994,13 +994,11 @@ class GPFSDriver(driver.CloneableImageVD,
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
# retrieve store information from extra-specs
|
volume_utils.upload_volume(context,
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
image_utils.upload_volume(context,
|
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
self.local_path(volume),
|
self.local_path(volume),
|
||||||
store_id=store_id)
|
volume)
|
||||||
|
|
||||||
def _migrate_volume(self, volume, host):
|
def _migrate_volume(self, volume, host):
|
||||||
"""Migrate vol if source and dest are managed by same GPFS cluster."""
|
"""Migrate vol if source and dest are managed by same GPFS cluster."""
|
||||||
|
@ -33,6 +33,7 @@ from cinder.image import image_utils
|
|||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import configuration
|
from cinder.volume import configuration
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import linstor
|
import linstor
|
||||||
@ -662,13 +663,13 @@ class LinstorBaseDriver(driver.VolumeDriver):
|
|||||||
return lin_drv.all_api_responses_success(api_response)
|
return lin_drv.all_api_responses_success(api_response)
|
||||||
|
|
||||||
def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path,
|
def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path,
|
||||||
store_id=None):
|
volume):
|
||||||
|
|
||||||
return image_utils.upload_volume(context,
|
return volume_utils.upload_volume(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
rsc_path,
|
rsc_path,
|
||||||
store_id=store_id)
|
volume)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Snapshot
|
# Snapshot
|
||||||
@ -980,13 +981,11 @@ class LinstorBaseDriver(driver.VolumeDriver):
|
|||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume)
|
full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume)
|
||||||
rsc_path = str(self._get_rsc_path(full_rsc_name))
|
rsc_path = str(self._get_rsc_path(full_rsc_name))
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
self._copy_vol_to_image(context,
|
self._copy_vol_to_image(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
rsc_path,
|
rsc_path,
|
||||||
store_id=store_id)
|
volume)
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# Not supported currently
|
# Not supported currently
|
||||||
|
@ -524,14 +524,11 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
# retrieve store information from extra-specs
|
volume_utils.upload_volume(context,
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
|
|
||||||
image_utils.upload_volume(context,
|
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
self.local_path(volume),
|
self.local_path(volume),
|
||||||
store_id=store_id)
|
volume)
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
"""Creates a clone of the specified volume."""
|
"""Creates a clone of the specified volume."""
|
||||||
|
@ -1626,9 +1626,6 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
|
|||||||
volume_id=volume.id)
|
volume_id=volume.id)
|
||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
|
|
||||||
tmp_dir = volume_utils.image_conversion_dir()
|
tmp_dir = volume_utils.image_conversion_dir()
|
||||||
tmp_file = os.path.join(tmp_dir,
|
tmp_file = os.path.join(tmp_dir,
|
||||||
volume.name + '-' + image_meta['id'])
|
volume.name + '-' + image_meta['id'])
|
||||||
@ -1638,9 +1635,9 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
|
|||||||
volume.name, tmp_file]
|
volume.name, tmp_file]
|
||||||
args.extend(self._ceph_args())
|
args.extend(self._ceph_args())
|
||||||
self._try_execute(*args)
|
self._try_execute(*args)
|
||||||
image_utils.upload_volume(context, image_service,
|
volume_utils.upload_volume(context, image_service,
|
||||||
image_meta, tmp_file,
|
image_meta, tmp_file,
|
||||||
store_id=store_id)
|
volume)
|
||||||
os.unlink(tmp_file)
|
os.unlink(tmp_file)
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
|
@ -474,13 +474,12 @@ class RemoteFSDriver(driver.BaseVD):
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
volume_utils.upload_volume(context,
|
||||||
image_utils.upload_volume(context,
|
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
self.local_path(volume),
|
self.local_path(volume),
|
||||||
run_as_root=self._execute_as_root,
|
volume,
|
||||||
store_id=store_id)
|
run_as_root=self._execute_as_root)
|
||||||
|
|
||||||
def _read_config_file(self, config_file):
|
def _read_config_file(self, config_file):
|
||||||
# Returns list of lines in file
|
# Returns list of lines in file
|
||||||
@ -975,15 +974,12 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
|
|||||||
else:
|
else:
|
||||||
upload_path = active_file_path
|
upload_path = active_file_path
|
||||||
|
|
||||||
if not store_id:
|
volume_utils.upload_volume(context,
|
||||||
store_id = volume.volume_type.extra_specs.get(
|
|
||||||
'image_service:store_id')
|
|
||||||
image_utils.upload_volume(context,
|
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
upload_path,
|
upload_path,
|
||||||
run_as_root=self._execute_as_root,
|
volume,
|
||||||
store_id=store_id)
|
run_as_root=self._execute_as_root)
|
||||||
|
|
||||||
def get_active_image_from_info(self, volume):
|
def get_active_image_from_info(self, volume):
|
||||||
"""Returns filename of the active image from the info file."""
|
"""Returns filename of the active image from the info file."""
|
||||||
|
@ -26,6 +26,7 @@ from cinder.image import image_utils
|
|||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -358,8 +359,6 @@ class SPDKDriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
volume['provider_location'] = (
|
volume['provider_location'] = (
|
||||||
self.create_export(context, volume, None)['provider_location'])
|
self.create_export(context, volume, None)['provider_location'])
|
||||||
connection_data = self.initialize_connection(volume, None)['data']
|
connection_data = self.initialize_connection(volume, None)['data']
|
||||||
@ -376,12 +375,11 @@ class SPDKDriver(driver.VolumeDriver):
|
|||||||
connection_data['device_path'] = device_info['path']
|
connection_data['device_path'] = device_info['path']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
image_utils.upload_volume(context,
|
volume_utils.upload_volume(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
device_info['path'],
|
device_info['path'],
|
||||||
store_id=store_id)
|
volume)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
target_connector.disconnect_volume(connection_data, volume)
|
target_connector.disconnect_volume(connection_data, volume)
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@ from cinder import interface
|
|||||||
from cinder.volume.drivers.vmware import datastore as hub
|
from cinder.volume.drivers.vmware import datastore as hub
|
||||||
from cinder.volume.drivers.vmware import vmdk
|
from cinder.volume.drivers.vmware import vmdk
|
||||||
from cinder.volume.drivers.vmware import volumeops as vops
|
from cinder.volume.drivers.vmware import volumeops as vops
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -262,6 +263,9 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
|
|||||||
store_id = volume.volume_type.extra_specs.get(
|
store_id = volume.volume_type.extra_specs.get(
|
||||||
'image_service:store_id')
|
'image_service:store_id')
|
||||||
|
|
||||||
|
# TODO (whoami-rajat): Remove store_id and base_image_ref
|
||||||
|
# parameters when oslo.vmware calls volume_utils wrapper of
|
||||||
|
# upload_volume instead of image_utils.upload_volume
|
||||||
image_transfer.upload_image(
|
image_transfer.upload_image(
|
||||||
context,
|
context,
|
||||||
conf.vmware_image_transfer_timeout_secs,
|
conf.vmware_image_transfer_timeout_secs,
|
||||||
@ -275,7 +279,8 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
|
|||||||
vmdk_file_path=vmdk_file_path,
|
vmdk_file_path=vmdk_file_path,
|
||||||
vmdk_size=volume.size * units.Gi,
|
vmdk_size=volume.size * units.Gi,
|
||||||
image_name=image_meta['name'],
|
image_name=image_meta['name'],
|
||||||
store_id=store_id)
|
store_id=store_id,
|
||||||
|
base_image_ref=volume_utils.get_base_image_ref(volume))
|
||||||
finally:
|
finally:
|
||||||
if attached:
|
if attached:
|
||||||
self.volumeops.detach_fcd(backing, fcd_loc)
|
self.volumeops.detach_fcd(backing, fcd_loc)
|
||||||
|
@ -47,6 +47,7 @@ from cinder.volume.drivers.vmware import datastore as hub
|
|||||||
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
|
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
|
||||||
from cinder.volume.drivers.vmware import volumeops
|
from cinder.volume.drivers.vmware import volumeops
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -1539,6 +1540,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
|
|||||||
# retrieve store information from extra-specs
|
# retrieve store information from extra-specs
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
||||||
|
|
||||||
|
# TODO (whoami-rajat): Remove store_id and base_image_ref
|
||||||
|
# parameters when oslo.vmware calls volume_utils wrapper of
|
||||||
|
# upload_volume instead of image_utils.upload_volume
|
||||||
image_transfer.upload_image(context,
|
image_transfer.upload_image(context,
|
||||||
timeout,
|
timeout,
|
||||||
image_service,
|
image_service,
|
||||||
@ -1552,7 +1556,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
|
|||||||
vmdk_size=volume['size'] * units.Gi,
|
vmdk_size=volume['size'] * units.Gi,
|
||||||
image_name=image_meta['name'],
|
image_name=image_meta['name'],
|
||||||
image_version=1,
|
image_version=1,
|
||||||
store_id=store_id)
|
store_id=store_id,
|
||||||
|
base_image_ref=
|
||||||
|
volume_utils.get_base_image_ref(volume))
|
||||||
LOG.info("Done copying volume %(vol)s to a new image %(img)s",
|
LOG.info("Done copying volume %(vol)s to a new image %(img)s",
|
||||||
{'vol': volume['name'], 'img': image_meta['name']})
|
{'vol': volume['name'], 'img': image_meta['name']})
|
||||||
|
|
||||||
|
@ -285,8 +285,6 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
disk_format = self._tgt_utils.get_supported_disk_format()
|
disk_format = self._tgt_utils.get_supported_disk_format()
|
||||||
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
|
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
|
||||||
str(image_meta['id']) + '.' + disk_format)
|
str(image_meta['id']) + '.' + disk_format)
|
||||||
@ -296,9 +294,9 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
|
|||||||
# qemu-img cannot access VSS snapshots, for which reason it
|
# qemu-img cannot access VSS snapshots, for which reason it
|
||||||
# must be exported first.
|
# must be exported first.
|
||||||
self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path)
|
self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path)
|
||||||
image_utils.upload_volume(context, image_service, image_meta,
|
volume_utils.upload_volume(
|
||||||
temp_vhd_path, 'vhd',
|
context, image_service, image_meta, temp_vhd_path, volume,
|
||||||
store_id=store_id)
|
'vhd')
|
||||||
finally:
|
finally:
|
||||||
fileutils.delete_if_exists(temp_vhd_path)
|
fileutils.delete_if_exists(temp_vhd_path)
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ from cinder import objects
|
|||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume import configuration
|
from cinder.volume import configuration
|
||||||
from cinder.volume.drivers import remotefs as remotefs_drv
|
from cinder.volume.drivers import remotefs as remotefs_drv
|
||||||
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
VERSION = '1.1.0'
|
VERSION = '1.1.0'
|
||||||
|
|
||||||
@ -553,8 +554,6 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
|
|||||||
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
|
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
# retrieve store information from extra-specs
|
|
||||||
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
|
||||||
|
|
||||||
# If snapshots exist, flatten to a temporary image, and upload it
|
# If snapshots exist, flatten to a temporary image, and upload it
|
||||||
|
|
||||||
@ -580,12 +579,12 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
|
|||||||
else:
|
else:
|
||||||
upload_path = active_file_path
|
upload_path = active_file_path
|
||||||
|
|
||||||
image_utils.upload_volume(context,
|
volume_utils.upload_volume(context,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta,
|
image_meta,
|
||||||
upload_path,
|
upload_path,
|
||||||
root_file_fmt,
|
volume,
|
||||||
store_id=store_id)
|
root_file_fmt)
|
||||||
finally:
|
finally:
|
||||||
if temp_path:
|
if temp_path:
|
||||||
self._delete(temp_path)
|
self._delete(temp_path)
|
||||||
|
@ -51,6 +51,7 @@ from cinder import context
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _
|
||||||
|
from cinder.image import image_utils
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
@ -1252,3 +1253,32 @@ def update_backup_error(backup, err, status=fields.BackupStatus.ERROR):
|
|||||||
backup.status = status
|
backup.status = status
|
||||||
backup.fail_reason = err
|
backup.fail_reason = err
|
||||||
backup.save()
|
backup.save()
|
||||||
|
|
||||||
|
|
||||||
|
# TODO (whoami-rajat): Remove this method when oslo.vmware calls volume_utils
|
||||||
|
# wrapper of upload_volume instead of image_utils.upload_volume
|
||||||
|
def get_base_image_ref(volume):
|
||||||
|
# This method fetches the image_id from volume glance metadata and pass
|
||||||
|
# it to the driver calling it during upload volume to image operation
|
||||||
|
base_image_ref = None
|
||||||
|
if volume.glance_metadata:
|
||||||
|
base_image_ref = volume.glance_metadata.get('image_id')
|
||||||
|
return base_image_ref
|
||||||
|
|
||||||
|
|
||||||
|
def upload_volume(context, image_service, image_meta, volume_path,
|
||||||
|
volume, volume_format='raw', run_as_root=True,
|
||||||
|
compress=True):
|
||||||
|
# retrieve store information from extra-specs
|
||||||
|
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
|
||||||
|
|
||||||
|
# This fetches the image_id from volume glance metadata and pass
|
||||||
|
# it to the driver calling it during upload volume to image operation
|
||||||
|
base_image_ref = None
|
||||||
|
if volume.glance_metadata:
|
||||||
|
base_image_ref = volume.glance_metadata.get('image_id')
|
||||||
|
image_utils.upload_volume(context, image_service, image_meta, volume_path,
|
||||||
|
volume_format=volume_format,
|
||||||
|
run_as_root=run_as_root,
|
||||||
|
compress=compress, store_id=store_id,
|
||||||
|
base_image_ref=base_image_ref)
|
||||||
|
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
This release includes support for Glance automatic image colocation.
|
||||||
|
When a volume originally created from an image is uploaded to the
|
||||||
|
Image service, Cinder passes Glance a reference to the original
|
||||||
|
image. Glance may use this information to colocate the new image data
|
||||||
|
in the same image store(s) as the original image data. Consult the
|
||||||
|
Glance documentation for more information.
|
Loading…
Reference in New Issue
Block a user