Remove log translations
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I4c96f3590d46205c45d12ee4ead8c208e11c52c5
This commit is contained in:
parent
3a6c184d52
commit
a55a6b5c71
@ -19,8 +19,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import paste.urlmap
|
import paste.urlmap
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -28,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def root_app_factory(loader, global_conf, **local_conf):
|
def root_app_factory(loader, global_conf, **local_conf):
|
||||||
if CONF.enable_v1_api:
|
if CONF.enable_v1_api:
|
||||||
LOG.warning(_LW('The v1 api is deprecated and is not under active '
|
LOG.warning('The v1 api is deprecated and is not under active '
|
||||||
'development. You should set enable_v1_api=false '
|
'development. You should set enable_v1_api=false '
|
||||||
'and enable_v3_api=true in your cinder.conf file.'))
|
'and enable_v3_api=true in your cinder.conf file.')
|
||||||
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
||||||
|
@ -28,7 +28,7 @@ from cinder.api.openstack import wsgi
|
|||||||
from cinder.api.views import backups as backup_views
|
from cinder.api.views import backups as backup_views
|
||||||
from cinder import backup as backupAPI
|
from cinder import backup as backupAPI
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -59,7 +59,7 @@ class BackupsController(wsgi.Controller):
|
|||||||
LOG.debug('Delete called for member %s.', id)
|
LOG.debug('Delete called for member %s.', id)
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
LOG.info(_LI('Delete backup with id: %s'), id)
|
LOG.info('Delete backup with id: %s', id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
backup = self.backup_api.get(context, id)
|
backup = self.backup_api.get(context, id)
|
||||||
@ -141,8 +141,8 @@ class BackupsController(wsgi.Controller):
|
|||||||
incremental = backup.get('incremental', False)
|
incremental = backup.get('incremental', False)
|
||||||
force = backup.get('force', False)
|
force = backup.get('force', False)
|
||||||
snapshot_id = backup.get('snapshot_id', None)
|
snapshot_id = backup.get('snapshot_id', None)
|
||||||
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
|
LOG.info("Creating backup of volume %(volume_id)s in container"
|
||||||
" %(container)s"),
|
" %(container)s",
|
||||||
{'volume_id': volume_id, 'container': container},
|
{'volume_id': volume_id, 'container': container},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ class BackupsController(wsgi.Controller):
|
|||||||
volume_id = restore.get('volume_id', None)
|
volume_id = restore.get('volume_id', None)
|
||||||
name = restore.get('name', None)
|
name = restore.get('name', None)
|
||||||
|
|
||||||
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
|
LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s",
|
||||||
{'backup_id': id, 'volume_id': volume_id},
|
{'backup_id': id, 'volume_id': volume_id},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from cinder.api.views import cgsnapshots as cgsnapshot_views
|
|||||||
from cinder import consistencygroup as consistencygroup_api
|
from cinder import consistencygroup as consistencygroup_api
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder.objects import cgsnapshot as cgsnap_obj
|
from cinder.objects import cgsnapshot as cgsnap_obj
|
||||||
from cinder.objects import consistencygroup as cg_obj
|
from cinder.objects import consistencygroup as cg_obj
|
||||||
from cinder.objects import group as grp_obj
|
from cinder.objects import group as grp_obj
|
||||||
@ -62,7 +62,7 @@ class CgsnapshotsController(wsgi.Controller):
|
|||||||
LOG.debug('delete called for member %s', id)
|
LOG.debug('delete called for member %s', id)
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
LOG.info(_LI('Delete cgsnapshot with id: %s'), id)
|
LOG.info('Delete cgsnapshot with id: %s', id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cgsnapshot = self._get_cgsnapshot(context, id)
|
cgsnapshot = self._get_cgsnapshot(context, id)
|
||||||
@ -167,7 +167,7 @@ class CgsnapshotsController(wsgi.Controller):
|
|||||||
name = cgsnapshot.get('name', None)
|
name = cgsnapshot.get('name', None)
|
||||||
description = cgsnapshot.get('description', None)
|
description = cgsnapshot.get('description', None)
|
||||||
|
|
||||||
LOG.info(_LI("Creating cgsnapshot %(name)s."),
|
LOG.info("Creating cgsnapshot %(name)s.",
|
||||||
{'name': name},
|
{'name': name},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from cinder.api.views import consistencygroups as consistencygroup_views
|
|||||||
from cinder import consistencygroup as consistencygroup_api
|
from cinder import consistencygroup as consistencygroup_api
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder.objects import cgsnapshot as cgsnap_obj
|
from cinder.objects import cgsnapshot as cgsnap_obj
|
||||||
from cinder.objects import consistencygroup as cg_obj
|
from cinder.objects import consistencygroup as cg_obj
|
||||||
from cinder.objects import group as grp_obj
|
from cinder.objects import group as grp_obj
|
||||||
@ -77,7 +77,7 @@ class ConsistencyGroupsController(wsgi.Controller):
|
|||||||
msg = _("Invalid value '%s' for force.") % force
|
msg = _("Invalid value '%s' for force.") % force
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
LOG.info(_LI('Delete consistency group with id: %s'), id)
|
LOG.info('Delete consistency group with id: %s', id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
group = self._get(context, id)
|
group = self._get(context, id)
|
||||||
@ -181,7 +181,7 @@ class ConsistencyGroupsController(wsgi.Controller):
|
|||||||
group_types.DEFAULT_CGSNAPSHOT_TYPE)
|
group_types.DEFAULT_CGSNAPSHOT_TYPE)
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Creating consistency group %(name)s."),
|
LOG.info("Creating consistency group %(name)s.",
|
||||||
{'name': name})
|
{'name': name})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -232,12 +232,12 @@ class ConsistencyGroupsController(wsgi.Controller):
|
|||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
if cgsnapshot_id:
|
if cgsnapshot_id:
|
||||||
LOG.info(_LI("Creating consistency group %(name)s from "
|
LOG.info("Creating consistency group %(name)s from "
|
||||||
"cgsnapshot %(snap)s."),
|
"cgsnapshot %(snap)s.",
|
||||||
{'name': name, 'snap': cgsnapshot_id})
|
{'name': name, 'snap': cgsnapshot_id})
|
||||||
elif source_cgid:
|
elif source_cgid:
|
||||||
LOG.info(_LI("Creating consistency group %(name)s from "
|
LOG.info("Creating consistency group %(name)s from "
|
||||||
"source consistency group %(source_cgid)s."),
|
"source consistency group %(source_cgid)s.",
|
||||||
{'name': name, 'source_cgid': source_cgid})
|
{'name': name, 'source_cgid': source_cgid})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -282,9 +282,9 @@ class ConsistencyGroupsController(wsgi.Controller):
|
|||||||
def _update(self, context, id, name, description, add_volumes,
|
def _update(self, context, id, name, description, add_volumes,
|
||||||
remove_volumes,
|
remove_volumes,
|
||||||
allow_empty=False):
|
allow_empty=False):
|
||||||
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
|
LOG.info("Updating consistency group %(id)s with name %(name)s "
|
||||||
"description: %(description)s add_volumes: "
|
"description: %(description)s add_volumes: "
|
||||||
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
|
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
|
||||||
{'id': id,
|
{'id': id,
|
||||||
'name': name,
|
'name': name,
|
||||||
'description': description,
|
'description': description,
|
||||||
|
@ -25,7 +25,7 @@ from cinder.api.openstack import wsgi
|
|||||||
from cinder.common import constants
|
from cinder.common import constants
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.volume import api as volume_api
|
from cinder.volume import api as volume_api
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ class HostController(wsgi.Controller):
|
|||||||
"""Sets the specified host's ability to accept new volumes."""
|
"""Sets the specified host's ability to accept new volumes."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
state = "enabled" if enabled else "disabled"
|
state = "enabled" if enabled else "disabled"
|
||||||
LOG.info(_LI("Setting host %(host)s to %(state)s."),
|
LOG.info("Setting host %(host)s to %(state)s.",
|
||||||
{'host': host, 'state': state})
|
{'host': host, 'state': state})
|
||||||
result = self.api.set_host_enabled(context,
|
result = self.api.set_host_enabled(context,
|
||||||
host=host,
|
host=host,
|
||||||
|
@ -18,7 +18,7 @@ import webob
|
|||||||
|
|
||||||
from cinder.api import extensions
|
from cinder.api import extensions
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ class SnapshotActionsController(wsgi.Controller):
|
|||||||
|
|
||||||
update_dict.update({'progress': progress})
|
update_dict.update({'progress': progress})
|
||||||
|
|
||||||
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
|
LOG.info("Updating snapshot %(id)s with info %(dict)s",
|
||||||
{'id': id, 'dict': update_dict})
|
{'id': id, 'dict': update_dict})
|
||||||
|
|
||||||
current_snapshot.update(update_dict)
|
current_snapshot.update(update_dict)
|
||||||
|
@ -20,7 +20,6 @@ from webob import exc
|
|||||||
from cinder.api import extensions
|
from cinder.api import extensions
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LI
|
|
||||||
from cinder import volume
|
from cinder import volume
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -49,7 +48,7 @@ class SnapshotUnmanageController(wsgi.Controller):
|
|||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
authorize(context)
|
authorize(context)
|
||||||
|
|
||||||
LOG.info(_LI("Unmanage snapshot with id: %s"), id)
|
LOG.info("Unmanage snapshot with id: %s", id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
snapshot = self.volume_api.get_snapshot(context, id)
|
snapshot = self.volume_api.get_snapshot(context, id)
|
||||||
|
@ -27,7 +27,7 @@ from cinder.api.openstack import wsgi
|
|||||||
from cinder import context as ctxt
|
from cinder import context as ctxt
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
@ -80,10 +80,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
|
|||||||
expl = _('Volume Type is currently in use.')
|
expl = _('Volume Type is currently in use.')
|
||||||
raise webob.exc.HTTPBadRequest(explanation=expl)
|
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||||
else:
|
else:
|
||||||
msg = _LW("The option 'allow_inuse_volume_type_modification' "
|
msg = ("The option 'allow_inuse_volume_type_modification' "
|
||||||
"is deprecated and will be removed in a future "
|
"is deprecated and will be removed in a future "
|
||||||
"release. The default behavior going forward will "
|
"release. The default behavior going forward will "
|
||||||
"be to disallow modificaton of in-use types.")
|
"be to disallow modificaton of in-use types.")
|
||||||
versionutils.report_deprecated_feature(LOG, msg)
|
versionutils.report_deprecated_feature(LOG, msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ from cinder.api import extensions
|
|||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.api.views import transfers as transfer_view
|
from cinder.api.views import transfers as transfer_view
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import transfer as transferAPI
|
from cinder import transfer as transferAPI
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -95,7 +95,7 @@ class VolumeTransferController(wsgi.Controller):
|
|||||||
remove_whitespaces=True)
|
remove_whitespaces=True)
|
||||||
name = name.strip()
|
name = name.strip()
|
||||||
|
|
||||||
LOG.info(_LI("Creating transfer of volume %s"),
|
LOG.info("Creating transfer of volume %s",
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -124,7 +124,7 @@ class VolumeTransferController(wsgi.Controller):
|
|||||||
msg = _("Incorrect request body format")
|
msg = _("Incorrect request body format")
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Accepting transfer %s"), transfer_id)
|
LOG.info("Accepting transfer %s", transfer_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
accepted_transfer = self.transfer_api.accept(context, transfer_id,
|
accepted_transfer = self.transfer_api.accept(context, transfer_id,
|
||||||
@ -144,7 +144,7 @@ class VolumeTransferController(wsgi.Controller):
|
|||||||
"""Delete a transfer."""
|
"""Delete a transfer."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
LOG.info(_LI("Delete transfer with id: %s"), id)
|
LOG.info("Delete transfer with id: %s", id)
|
||||||
|
|
||||||
# Not found exception will be handled at the wsgi level
|
# Not found exception will be handled at the wsgi level
|
||||||
self.transfer_api.delete(context, transfer_id=id)
|
self.transfer_api.delete(context, transfer_id=id)
|
||||||
|
@ -18,7 +18,6 @@ import webob
|
|||||||
|
|
||||||
from cinder.api import extensions
|
from cinder.api import extensions
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.i18n import _LI
|
|
||||||
from cinder import volume
|
from cinder import volume
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -50,7 +49,7 @@ class VolumeUnmanageController(wsgi.Controller):
|
|||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
authorize(context)
|
authorize(context)
|
||||||
|
|
||||||
LOG.info(_LI("Unmanage volume with id: %s"), id)
|
LOG.info("Unmanage volume with id: %s", id)
|
||||||
|
|
||||||
# Not found exception will be handled at the wsgi level
|
# Not found exception will be handled at the wsgi level
|
||||||
vol = self.volume_api.get(context, id)
|
vol = self.volume_api.get(context, id)
|
||||||
|
@ -25,7 +25,6 @@ import webob.exc
|
|||||||
import cinder.api.openstack
|
import cinder.api.openstack
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LI, _LW
|
|
||||||
import cinder.policy
|
import cinder.policy
|
||||||
|
|
||||||
|
|
||||||
@ -123,7 +122,7 @@ class ExtensionManager(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
LOG.info(_LI('Initializing extension manager.'))
|
LOG.info('Initializing extension manager.')
|
||||||
|
|
||||||
self.cls_list = CONF.osapi_volume_extension
|
self.cls_list = CONF.osapi_volume_extension
|
||||||
self.extensions = {}
|
self.extensions = {}
|
||||||
@ -138,7 +137,7 @@ class ExtensionManager(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
alias = ext.alias
|
alias = ext.alias
|
||||||
LOG.info(_LI('Loaded extension: %s'), alias)
|
LOG.info('Loaded extension: %s', alias)
|
||||||
|
|
||||||
if alias in self.extensions:
|
if alias in self.extensions:
|
||||||
raise exception.Error("Found duplicate extension: %s" % alias)
|
raise exception.Error("Found duplicate extension: %s" % alias)
|
||||||
@ -182,7 +181,7 @@ class ExtensionManager(object):
|
|||||||
' '.join(extension.__doc__.strip().split()))
|
' '.join(extension.__doc__.strip().split()))
|
||||||
LOG.debug('Ext updated: %s', extension.updated)
|
LOG.debug('Ext updated: %s', extension.updated)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.exception(_LE("Exception loading extension."))
|
LOG.exception("Exception loading extension.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -214,8 +213,8 @@ class ExtensionManager(object):
|
|||||||
try:
|
try:
|
||||||
self.load_extension(ext_factory)
|
self.load_extension(ext_factory)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
|
LOG.warning('Failed to load extension %(ext_factory)s: '
|
||||||
'%(exc)s'),
|
'%(exc)s',
|
||||||
{'ext_factory': ext_factory, 'exc': exc})
|
{'ext_factory': ext_factory, 'exc': exc})
|
||||||
|
|
||||||
|
|
||||||
@ -288,8 +287,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
|
|||||||
try:
|
try:
|
||||||
ext_mgr.load_extension(classpath)
|
ext_mgr.load_extension(classpath)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning(_LW('Failed to load extension %(classpath)s: '
|
logger.warning('Failed to load extension %(classpath)s: '
|
||||||
'%(exc)s'),
|
'%(exc)s',
|
||||||
{'classpath': classpath, 'exc': exc})
|
{'classpath': classpath, 'exc': exc})
|
||||||
|
|
||||||
# Now, let's consider any subdirectories we may have...
|
# Now, let's consider any subdirectories we may have...
|
||||||
@ -313,8 +312,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
|
|||||||
try:
|
try:
|
||||||
ext(ext_mgr)
|
ext(ext_mgr)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning(_LW('Failed to load extension '
|
logger.warning('Failed to load extension '
|
||||||
'%(ext_name)s: %(exc)s'),
|
'%(ext_name)s: %(exc)s',
|
||||||
{'ext_name': ext_name, 'exc': exc})
|
{'ext_name': ext_name, 'exc': exc})
|
||||||
|
|
||||||
# Update the list of directories we'll explore...
|
# Update the list of directories we'll explore...
|
||||||
|
@ -21,7 +21,7 @@ import webob.exc
|
|||||||
|
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.wsgi import common as base_wsgi
|
from cinder.wsgi import common as base_wsgi
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
|||||||
|
|
||||||
def _error(self, inner, req):
|
def _error(self, inner, req):
|
||||||
if not isinstance(inner, exception.QuotaError):
|
if not isinstance(inner, exception.QuotaError):
|
||||||
LOG.exception(_LE("Caught error: %(type)s %(error)s"),
|
LOG.exception("Caught error: %(type)s %(error)s",
|
||||||
{'type': type(inner),
|
{'type': type(inner),
|
||||||
'error': inner})
|
'error': inner})
|
||||||
safe = getattr(inner, 'safe', False)
|
safe = getattr(inner, 'safe', False)
|
||||||
@ -54,7 +54,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
|||||||
status = 500
|
status = 500
|
||||||
|
|
||||||
msg_dict = dict(url=req.url, status=status)
|
msg_dict = dict(url=req.url, status=status)
|
||||||
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
|
LOG.info("%(url)s returned with HTTP %(status)d", msg_dict)
|
||||||
outer = self.status_to_type(status)
|
outer = self.status_to_type(status)
|
||||||
if headers:
|
if headers:
|
||||||
outer.headers = headers
|
outer.headers = headers
|
||||||
|
@ -23,7 +23,7 @@ from oslo_service import wsgi as base_wsgi
|
|||||||
import routes
|
import routes
|
||||||
|
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -111,8 +111,8 @@ class APIRouter(base_wsgi.Router):
|
|||||||
controller = extension.controller
|
controller = extension.controller
|
||||||
|
|
||||||
if collection not in self.resources:
|
if collection not in self.resources:
|
||||||
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
|
LOG.warning('Extension %(ext_name)s: Cannot extend '
|
||||||
'resource %(collection)s: No such resource'),
|
'resource %(collection)s: No such resource',
|
||||||
{'ext_name': extension.extension.name,
|
{'ext_name': extension.extension.name,
|
||||||
'collection': collection})
|
'collection': collection})
|
||||||
continue
|
continue
|
||||||
|
@ -32,7 +32,7 @@ from cinder.api.openstack import api_version_request as api_version
|
|||||||
from cinder.api.openstack import versioned_method
|
from cinder.api.openstack import versioned_method
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import i18n
|
from cinder import i18n
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import policy
|
from cinder import policy
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.wsgi import common as wsgi
|
from cinder.wsgi import common as wsgi
|
||||||
@ -602,15 +602,14 @@ class ResourceExceptionHandler(object):
|
|||||||
code=ex_value.code, explanation=six.text_type(ex_value)))
|
code=ex_value.code, explanation=six.text_type(ex_value)))
|
||||||
elif isinstance(ex_value, TypeError):
|
elif isinstance(ex_value, TypeError):
|
||||||
exc_info = (ex_type, ex_value, ex_traceback)
|
exc_info = (ex_type, ex_value, ex_traceback)
|
||||||
LOG.error(_LE(
|
LOG.error('Exception handling resource: %s',
|
||||||
'Exception handling resource: %s'),
|
ex_value, exc_info=exc_info)
|
||||||
ex_value, exc_info=exc_info)
|
|
||||||
raise Fault(webob.exc.HTTPBadRequest())
|
raise Fault(webob.exc.HTTPBadRequest())
|
||||||
elif isinstance(ex_value, Fault):
|
elif isinstance(ex_value, Fault):
|
||||||
LOG.info(_LI("Fault thrown: %s"), ex_value)
|
LOG.info("Fault thrown: %s", ex_value)
|
||||||
raise ex_value
|
raise ex_value
|
||||||
elif isinstance(ex_value, webob.exc.HTTPException):
|
elif isinstance(ex_value, webob.exc.HTTPException):
|
||||||
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
|
LOG.info("HTTP exception thrown: %s", ex_value)
|
||||||
raise Fault(ex_value)
|
raise Fault(ex_value)
|
||||||
|
|
||||||
# We didn't handle the exception
|
# We didn't handle the exception
|
||||||
@ -812,7 +811,7 @@ class Resource(wsgi.Application):
|
|||||||
def __call__(self, request):
|
def __call__(self, request):
|
||||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||||
|
|
||||||
LOG.info(_LI("%(method)s %(url)s"),
|
LOG.info("%(method)s %(url)s",
|
||||||
{"method": request.method,
|
{"method": request.method,
|
||||||
"url": request.url})
|
"url": request.url})
|
||||||
|
|
||||||
@ -934,10 +933,10 @@ class Resource(wsgi.Application):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
msg_dict = dict(url=request.url, status=response.status_int)
|
msg_dict = dict(url=request.url, status=response.status_int)
|
||||||
msg = _LI("%(url)s returned with HTTP %(status)d")
|
msg = "%(url)s returned with HTTP %(status)d"
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
msg_dict = dict(url=request.url, e=e)
|
msg_dict = dict(url=request.url, e=e)
|
||||||
msg = _LI("%(url)s returned a fault: %(e)s")
|
msg = "%(url)s returned a fault: %(e)s"
|
||||||
|
|
||||||
LOG.info(msg, msg_dict)
|
LOG.info(msg, msg_dict)
|
||||||
|
|
||||||
@ -972,7 +971,7 @@ class Resource(wsgi.Application):
|
|||||||
'create',
|
'create',
|
||||||
'delete',
|
'delete',
|
||||||
'update']):
|
'update']):
|
||||||
LOG.exception(_LE('Get method error.'))
|
LOG.exception('Get method error.')
|
||||||
else:
|
else:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
else:
|
else:
|
||||||
|
@ -25,7 +25,7 @@ from cinder.api import common
|
|||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.api.views import snapshots as snapshot_views
|
from cinder.api.views import snapshots as snapshot_views
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder import volume
|
from cinder import volume
|
||||||
from cinder.volume import utils as volume_utils
|
from cinder.volume import utils as volume_utils
|
||||||
@ -58,7 +58,7 @@ class SnapshotsController(wsgi.Controller):
|
|||||||
"""Delete a snapshot."""
|
"""Delete a snapshot."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
LOG.info(_LI("Delete snapshot with id: %s"), id)
|
LOG.info("Delete snapshot with id: %s", id)
|
||||||
|
|
||||||
# Not found exception will be handled at the wsgi level
|
# Not found exception will be handled at the wsgi level
|
||||||
snapshot = self.volume_api.get_snapshot(context, id)
|
snapshot = self.volume_api.get_snapshot(context, id)
|
||||||
@ -127,8 +127,7 @@ class SnapshotsController(wsgi.Controller):
|
|||||||
|
|
||||||
volume = self.volume_api.get(context, volume_id)
|
volume = self.volume_api.get(context, volume_id)
|
||||||
force = snapshot.get('force', False)
|
force = snapshot.get('force', False)
|
||||||
msg = _LI("Create snapshot from volume %s")
|
LOG.info("Create snapshot from volume %s", volume_id)
|
||||||
LOG.info(msg, volume_id)
|
|
||||||
self.validate_name_and_description(snapshot)
|
self.validate_name_and_description(snapshot)
|
||||||
|
|
||||||
# NOTE(thingee): v2 API allows name instead of display_name
|
# NOTE(thingee): v2 API allows name instead of display_name
|
||||||
|
@ -28,7 +28,7 @@ from cinder.api.v2.views import volumes as volume_views
|
|||||||
from cinder import consistencygroup as consistencygroupAPI
|
from cinder import consistencygroup as consistencygroupAPI
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder.image import glance
|
from cinder.image import glance
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -70,7 +70,7 @@ class VolumeController(wsgi.Controller):
|
|||||||
|
|
||||||
cascade = utils.get_bool_param('cascade', req.params)
|
cascade = utils.get_bool_param('cascade', req.params)
|
||||||
|
|
||||||
LOG.info(_LI("Delete volume with id: %s"), id)
|
LOG.info("Delete volume with id: %s", id)
|
||||||
|
|
||||||
# Not found exception will be handled at the wsgi level
|
# Not found exception will be handled at the wsgi level
|
||||||
volume = self.volume_api.get(context, id)
|
volume = self.volume_api.get(context, id)
|
||||||
@ -257,7 +257,7 @@ class VolumeController(wsgi.Controller):
|
|||||||
elif size is None and kwargs['source_replica'] is not None:
|
elif size is None and kwargs['source_replica'] is not None:
|
||||||
size = kwargs['source_replica']['size']
|
size = kwargs['source_replica']['size']
|
||||||
|
|
||||||
LOG.info(_LI("Create volume of %s GB"), size)
|
LOG.info("Create volume of %s GB", size)
|
||||||
|
|
||||||
if self.ext_mgr.is_loaded('os-image-create'):
|
if self.ext_mgr.is_loaded('os-image-create'):
|
||||||
image_ref = volume.get('imageRef')
|
image_ref = volume.get('imageRef')
|
||||||
|
@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
|
|||||||
from cinder.api.v3.views import group_snapshots as group_snapshot_views
|
from cinder.api.v3.views import group_snapshots as group_snapshot_views
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder.volume import group_types
|
from cinder.volume import group_types
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ class GroupSnapshotsController(wsgi.Controller):
|
|||||||
LOG.debug('delete called for member %s', id)
|
LOG.debug('delete called for member %s', id)
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context)
|
LOG.info('Delete group_snapshot with id: %s', id, context=context)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
group_snapshot = self.group_snapshot_api.get_group_snapshot(
|
group_snapshot = self.group_snapshot_api.get_group_snapshot(
|
||||||
@ -160,7 +160,7 @@ class GroupSnapshotsController(wsgi.Controller):
|
|||||||
name = group_snapshot.get('name', None)
|
name = group_snapshot.get('name', None)
|
||||||
description = group_snapshot.get('description', None)
|
description = group_snapshot.get('description', None)
|
||||||
|
|
||||||
LOG.info(_LI("Creating group_snapshot %(name)s."),
|
LOG.info("Creating group_snapshot %(name)s.",
|
||||||
{'name': name},
|
{'name': name},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
|
|||||||
from cinder.api.v3.views import groups as views_groups
|
from cinder.api.v3.views import groups as views_groups
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder.volume import group_types
|
from cinder.volume import group_types
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ class GroupsController(wsgi.Controller):
|
|||||||
% del_vol)
|
% del_vol)
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
LOG.info(_LI('Delete group with id: %s'), id,
|
LOG.info('Delete group with id: %s', id,
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -217,7 +217,7 @@ class GroupsController(wsgi.Controller):
|
|||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
availability_zone = group.get('availability_zone')
|
availability_zone = group.get('availability_zone')
|
||||||
|
|
||||||
LOG.info(_LI("Creating group %(name)s."),
|
LOG.info("Creating group %(name)s.",
|
||||||
{'name': name},
|
{'name': name},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
@ -268,16 +268,16 @@ class GroupsController(wsgi.Controller):
|
|||||||
|
|
||||||
group_type_id = None
|
group_type_id = None
|
||||||
if group_snapshot_id:
|
if group_snapshot_id:
|
||||||
LOG.info(_LI("Creating group %(name)s from group_snapshot "
|
LOG.info("Creating group %(name)s from group_snapshot "
|
||||||
"%(snap)s."),
|
"%(snap)s.",
|
||||||
{'name': name, 'snap': group_snapshot_id},
|
{'name': name, 'snap': group_snapshot_id},
|
||||||
context=context)
|
context=context)
|
||||||
grp_snap = self.group_api.get_group_snapshot(context,
|
grp_snap = self.group_api.get_group_snapshot(context,
|
||||||
group_snapshot_id)
|
group_snapshot_id)
|
||||||
group_type_id = grp_snap.group_type_id
|
group_type_id = grp_snap.group_type_id
|
||||||
elif source_group_id:
|
elif source_group_id:
|
||||||
LOG.info(_LI("Creating group %(name)s from "
|
LOG.info("Creating group %(name)s from "
|
||||||
"source group %(source_group_id)s."),
|
"source group %(source_group_id)s.",
|
||||||
{'name': name, 'source_group_id': source_group_id},
|
{'name': name, 'source_group_id': source_group_id},
|
||||||
context=context)
|
context=context)
|
||||||
source_group = self.group_api.get(context, source_group_id)
|
source_group = self.group_api.get(context, source_group_id)
|
||||||
@ -341,9 +341,9 @@ class GroupsController(wsgi.Controller):
|
|||||||
"can not be all empty in the request body.")
|
"can not be all empty in the request body.")
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Updating group %(id)s with name %(name)s "
|
LOG.info("Updating group %(id)s with name %(name)s "
|
||||||
"description: %(description)s add_volumes: "
|
"description: %(description)s add_volumes: "
|
||||||
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
|
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
|
||||||
{'id': id, 'name': name,
|
{'id': id, 'name': name,
|
||||||
'description': description,
|
'description': description,
|
||||||
'add_volumes': add_volumes,
|
'add_volumes': add_volumes,
|
||||||
|
@ -25,8 +25,8 @@ from cinder.api.v2 import volumes as volumes_v2
|
|||||||
from cinder.api.v3.views import volumes as volume_views_v3
|
from cinder.api.v3.views import volumes as volume_views_v3
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import group as group_api
|
from cinder import group as group_api
|
||||||
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.i18n import _, _LI
|
|
||||||
import cinder.policy
|
import cinder.policy
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
@ -70,8 +70,8 @@ class VolumeController(volumes_v2.VolumeController):
|
|||||||
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
|
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
|
||||||
'f': force}
|
'f': force}
|
||||||
|
|
||||||
msg = _LI("Delete volume with id: %(id)s %(params)s")
|
LOG.info("Delete volume with id: %(id)s %(params)s",
|
||||||
LOG.info(msg, {'id': id, 'params': params}, context=context)
|
{'id': id, 'params': params}, context=context)
|
||||||
|
|
||||||
if force:
|
if force:
|
||||||
check_policy(context, 'force_delete')
|
check_policy(context, 'force_delete')
|
||||||
@ -264,7 +264,7 @@ class VolumeController(volumes_v2.VolumeController):
|
|||||||
elif size is None and kwargs['source_replica'] is not None:
|
elif size is None and kwargs['source_replica'] is not None:
|
||||||
size = kwargs['source_replica']['size']
|
size = kwargs['source_replica']['size']
|
||||||
|
|
||||||
LOG.info(_LI("Create volume of %s GB"), size)
|
LOG.info("Create volume of %s GB", size)
|
||||||
|
|
||||||
if self.ext_mgr.is_loaded('os-image-create'):
|
if self.ext_mgr.is_loaded('os-image-create'):
|
||||||
image_ref = volume.get('imageRef')
|
image_ref = volume.get('imageRef')
|
||||||
|
@ -33,7 +33,7 @@ from cinder.common import constants
|
|||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
import cinder.policy
|
import cinder.policy
|
||||||
@ -354,8 +354,8 @@ class API(base.Base):
|
|||||||
|
|
||||||
description = 'auto-created_from_restore_from_backup'
|
description = 'auto-created_from_restore_from_backup'
|
||||||
|
|
||||||
LOG.info(_LI("Creating volume of %(size)s GB for restore of "
|
LOG.info("Creating volume of %(size)s GB for restore of "
|
||||||
"backup %(backup_id)s."),
|
"backup %(backup_id)s.",
|
||||||
{'size': size, 'backup_id': backup_id})
|
{'size': size, 'backup_id': backup_id})
|
||||||
volume = self.volume_api.create(context, size, name, description)
|
volume = self.volume_api.create(context, size, name, description)
|
||||||
volume_id = volume['id']
|
volume_id = volume['id']
|
||||||
@ -380,8 +380,8 @@ class API(base.Base):
|
|||||||
{'volume_size': volume['size'], 'size': size})
|
{'volume_size': volume['size'], 'size': size})
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
|
LOG.info("Overwriting volume %(volume_id)s with restore of "
|
||||||
"backup %(backup_id)s"),
|
"backup %(backup_id)s",
|
||||||
{'volume_id': volume_id, 'backup_id': backup_id})
|
{'volume_id': volume_id, 'backup_id': backup_id})
|
||||||
|
|
||||||
# Setting the status here rather than setting at start and unrolling
|
# Setting the status here rather than setting at start and unrolling
|
||||||
|
@ -36,7 +36,7 @@ import six
|
|||||||
|
|
||||||
from cinder.backup import driver
|
from cinder.backup import driver
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume import utils as volume_utils
|
from cinder.volume import utils as volume_utils
|
||||||
@ -572,10 +572,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
|||||||
try:
|
try:
|
||||||
self._backup_metadata(backup, object_meta)
|
self._backup_metadata(backup, object_meta)
|
||||||
# Whatever goes wrong, we want to log, cleanup, and re-raise.
|
# Whatever goes wrong, we want to log, cleanup, and re-raise.
|
||||||
except Exception as err:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Backup volume metadata failed: %s."),
|
LOG.exception("Backup volume metadata failed.")
|
||||||
err)
|
|
||||||
self.delete(backup)
|
self.delete(backup)
|
||||||
|
|
||||||
self._finalize_backup(backup, container, object_meta, object_sha256)
|
self._finalize_backup(backup, container, object_meta, object_sha256)
|
||||||
@ -635,9 +634,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
|||||||
try:
|
try:
|
||||||
fileno = volume_file.fileno()
|
fileno = volume_file.fileno()
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.info(_LI("volume_file does not support "
|
LOG.info("volume_file does not support fileno() so skipping "
|
||||||
"fileno() so skipping "
|
"fsync()")
|
||||||
"fsync()"))
|
|
||||||
else:
|
else:
|
||||||
os.fsync(fileno)
|
os.fsync(fileno)
|
||||||
|
|
||||||
@ -722,8 +720,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
|||||||
try:
|
try:
|
||||||
object_names = self._generate_object_names(backup)
|
object_names = self._generate_object_names(backup)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Error while listing objects, continuing'
|
LOG.warning('Error while listing objects, continuing'
|
||||||
' with delete.'))
|
' with delete.')
|
||||||
|
|
||||||
for object_name in object_names:
|
for object_name in object_names:
|
||||||
self.delete_object(container, object_name)
|
self.delete_object(container, object_name)
|
||||||
|
@ -24,7 +24,7 @@ import six
|
|||||||
|
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import keymgr as key_manager
|
from cinder import keymgr as key_manager
|
||||||
|
|
||||||
service_opts = [
|
service_opts = [
|
||||||
@ -64,7 +64,7 @@ class BackupMetadataAPI(base.Base):
|
|||||||
try:
|
try:
|
||||||
jsonutils.dumps(value)
|
jsonutils.dumps(value)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
LOG.info(_LI("Value with type=%s is not serializable"),
|
LOG.info("Value with type=%s is not serializable",
|
||||||
type(value))
|
type(value))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -84,8 +84,8 @@ class BackupMetadataAPI(base.Base):
|
|||||||
for key, value in meta:
|
for key, value in meta:
|
||||||
# Exclude fields that are "not JSON serializable"
|
# Exclude fields that are "not JSON serializable"
|
||||||
if not self._is_serializable(value):
|
if not self._is_serializable(value):
|
||||||
LOG.info(_LI("Unable to serialize field '%s' - excluding "
|
LOG.info("Unable to serialize field '%s' - excluding "
|
||||||
"from backup"), key)
|
"from backup", key)
|
||||||
continue
|
continue
|
||||||
# Copy the encryption key uuid for backup
|
# Copy the encryption key uuid for backup
|
||||||
if key is 'encryption_key_id' and value is not None:
|
if key is 'encryption_key_id' and value is not None:
|
||||||
@ -112,8 +112,8 @@ class BackupMetadataAPI(base.Base):
|
|||||||
for entry in meta:
|
for entry in meta:
|
||||||
# Exclude fields that are "not JSON serializable"
|
# Exclude fields that are "not JSON serializable"
|
||||||
if not self._is_serializable(meta[entry]):
|
if not self._is_serializable(meta[entry]):
|
||||||
LOG.info(_LI("Unable to serialize field '%s' - excluding "
|
LOG.info("Unable to serialize field '%s' - excluding "
|
||||||
"from backup"), entry)
|
"from backup", entry)
|
||||||
continue
|
continue
|
||||||
container[type_tag][entry] = meta[entry]
|
container[type_tag][entry] = meta[entry]
|
||||||
|
|
||||||
@ -136,8 +136,8 @@ class BackupMetadataAPI(base.Base):
|
|||||||
for entry in meta:
|
for entry in meta:
|
||||||
# Exclude fields that are "not JSON serializable"
|
# Exclude fields that are "not JSON serializable"
|
||||||
if not self._is_serializable(entry.value):
|
if not self._is_serializable(entry.value):
|
||||||
LOG.info(_LI("Unable to serialize field '%s' - "
|
LOG.info("Unable to serialize field '%s' - "
|
||||||
"excluding from backup"), entry)
|
"excluding from backup", entry)
|
||||||
continue
|
continue
|
||||||
container[type_tag][entry.key] = entry.value
|
container[type_tag][entry.key] = entry.value
|
||||||
|
|
||||||
@ -234,9 +234,9 @@ class BackupMetadataAPI(base.Base):
|
|||||||
else:
|
else:
|
||||||
# Volume type id's do not match, and destination volume
|
# Volume type id's do not match, and destination volume
|
||||||
# has a volume type. Throw exception.
|
# has a volume type. Throw exception.
|
||||||
LOG.warning(_LW("Destination volume type is different from "
|
LOG.warning("Destination volume type is different from "
|
||||||
"source volume type for an encrypted volume. "
|
"source volume type for an encrypted volume. "
|
||||||
"Encrypted backup restore has failed."))
|
"Encrypted backup restore has failed.")
|
||||||
msg = (_("The source volume type '%(src)s' is different "
|
msg = (_("The source volume type '%(src)s' is different "
|
||||||
"than the destination volume type '%(dest)s'.") %
|
"than the destination volume type '%(dest)s'.") %
|
||||||
{'src': src_volume_type_id,
|
{'src': src_volume_type_id,
|
||||||
|
@ -58,7 +58,7 @@ from six.moves import range
|
|||||||
|
|
||||||
from cinder.backup import driver
|
from cinder.backup import driver
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
import cinder.volume.drivers.rbd as rbd_driver
|
import cinder.volume.drivers.rbd as rbd_driver
|
||||||
@ -181,8 +181,8 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
|
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
|
||||||
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
|
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("RBD striping not supported - ignoring configuration "
|
LOG.info("RBD striping not supported - ignoring configuration "
|
||||||
"settings for rbd striping"))
|
"settings for rbd striping.")
|
||||||
self.rbd_stripe_count = 0
|
self.rbd_stripe_count = 0
|
||||||
self.rbd_stripe_unit = 0
|
self.rbd_stripe_unit = 0
|
||||||
|
|
||||||
@ -258,8 +258,8 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
# moved to the driver's initialization so that it can stop
|
# moved to the driver's initialization so that it can stop
|
||||||
# the service from starting when the underyling RBD does not
|
# the service from starting when the underyling RBD does not
|
||||||
# support the requested features.
|
# support the requested features.
|
||||||
LOG.error(_LE("RBD journaling not supported - unable to "
|
LOG.error("RBD journaling not supported - unable to "
|
||||||
"support per image mirroring in backup pool"))
|
"support per image mirroring in backup pool")
|
||||||
raise exception.BackupInvalidCephArgs(
|
raise exception.BackupInvalidCephArgs(
|
||||||
_("Image Journaling set but RBD backend does "
|
_("Image Journaling set but RBD backend does "
|
||||||
"not support journaling")
|
"not support journaling")
|
||||||
@ -468,14 +468,14 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
backup.id)
|
backup.id)
|
||||||
if rem:
|
if rem:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("Backup base image of volume %(volume)s still "
|
"Backup base image of volume %(volume)s still "
|
||||||
"has %(snapshots)s snapshots so skipping base "
|
"has %(snapshots)s snapshots so skipping base "
|
||||||
"image delete."),
|
"image delete.",
|
||||||
{'snapshots': rem, 'volume': volume_id})
|
{'snapshots': rem, 'volume': volume_id})
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
|
LOG.info("Deleting backup base image='%(basename)s' of "
|
||||||
"volume %(volume)s."),
|
"volume %(volume)s.",
|
||||||
{'basename': base_name, 'volume': volume_id})
|
{'basename': base_name, 'volume': volume_id})
|
||||||
# Delete base if no more snapshots
|
# Delete base if no more snapshots
|
||||||
try:
|
try:
|
||||||
@ -483,17 +483,16 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
except self.rbd.ImageBusy:
|
except self.rbd.ImageBusy:
|
||||||
# Allow a retry if the image is busy
|
# Allow a retry if the image is busy
|
||||||
if retries > 0:
|
if retries > 0:
|
||||||
LOG.info(_LI("Backup image of volume %(volume)s is "
|
LOG.info("Backup image of volume %(volume)s is "
|
||||||
"busy, retrying %(retries)s more time(s) "
|
"busy, retrying %(retries)s more time(s) "
|
||||||
"in %(delay)ss."),
|
"in %(delay)ss.",
|
||||||
{'retries': retries,
|
{'retries': retries,
|
||||||
'delay': delay,
|
'delay': delay,
|
||||||
'volume': volume_id})
|
'volume': volume_id})
|
||||||
eventlet.sleep(delay)
|
eventlet.sleep(delay)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Max retries reached deleting backup "
|
LOG.error("Max retries reached deleting backup "
|
||||||
"%(basename)s image of volume "
|
"%(basename)s image of volume %(volume)s.",
|
||||||
"%(volume)s."),
|
|
||||||
{'volume': volume_id,
|
{'volume': volume_id,
|
||||||
'basename': base_name})
|
'basename': base_name})
|
||||||
raise
|
raise
|
||||||
@ -527,7 +526,7 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
|
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
LOG.error(_LE("Pipe1 failed - %s "), e)
|
LOG.error("Pipe1 failed - %s ", e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
|
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
|
||||||
@ -541,7 +540,7 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
LOG.error(_LE("Pipe2 failed - %s "), e)
|
LOG.error("Pipe2 failed - %s ", e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
p1.stdout.close()
|
p1.stdout.close()
|
||||||
@ -1005,8 +1004,7 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
dest_user=rbd_user, dest_conf=rbd_conf,
|
dest_user=rbd_user, dest_conf=rbd_conf,
|
||||||
src_snap=restore_point)
|
src_snap=restore_point)
|
||||||
except exception.BackupRBDOperationFailed:
|
except exception.BackupRBDOperationFailed:
|
||||||
LOG.exception(_LE("Differential restore failed, trying full "
|
LOG.exception("Differential restore failed, trying full restore")
|
||||||
"restore"))
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# If the volume we are restoring to is larger than the backup volume,
|
# If the volume we are restoring to is larger than the backup volume,
|
||||||
@ -1108,10 +1106,9 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
else:
|
else:
|
||||||
LOG.debug("Volume file is NOT RBD.")
|
LOG.debug("Volume file is NOT RBD.")
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("No restore point found for backup="
|
LOG.info("No restore point found for backup='%(backup)s' of "
|
||||||
"'%(backup)s' of volume %(volume)s "
|
"volume %(volume)s although base image is found - "
|
||||||
"although base image is found - "
|
"forcing full copy.",
|
||||||
"forcing full copy."),
|
|
||||||
{'backup': backup.id,
|
{'backup': backup.id,
|
||||||
'volume': backup.volume_id})
|
'volume': backup.volume_id})
|
||||||
return False, restore_point
|
return False, restore_point
|
||||||
@ -1196,8 +1193,8 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
LOG.debug('Restore to volume %s finished successfully.',
|
LOG.debug('Restore to volume %s finished successfully.',
|
||||||
volume_id)
|
volume_id)
|
||||||
except exception.BackupOperationError as e:
|
except exception.BackupOperationError as e:
|
||||||
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
|
LOG.error('Restore to volume %(volume)s finished with error - '
|
||||||
'%(error)s.'), {'error': e, 'volume': volume_id})
|
'%(error)s.', {'error': e, 'volume': volume_id})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def delete(self, backup):
|
def delete(self, backup):
|
||||||
@ -1209,8 +1206,8 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
self._try_delete_base_image(backup)
|
self._try_delete_base_image(backup)
|
||||||
except self.rbd.ImageNotFound:
|
except self.rbd.ImageNotFound:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("RBD image for backup %(backup)s of volume %(volume)s "
|
"RBD image for backup %(backup)s of volume %(volume)s "
|
||||||
"not found. Deleting backup metadata."),
|
"not found. Deleting backup metadata.",
|
||||||
{'backup': backup.id, 'volume': backup.volume_id})
|
{'backup': backup.id, 'volume': backup.volume_id})
|
||||||
delete_failed = True
|
delete_failed = True
|
||||||
|
|
||||||
@ -1218,9 +1215,8 @@ class CephBackupDriver(driver.BackupDriver):
|
|||||||
VolumeMetadataBackup(client, backup.id).remove_if_exists()
|
VolumeMetadataBackup(client, backup.id).remove_if_exists()
|
||||||
|
|
||||||
if delete_failed:
|
if delete_failed:
|
||||||
LOG.info(_LI("Delete of backup '%(backup)s' "
|
LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' "
|
||||||
"for volume '%(volume)s' "
|
"finished with warning.",
|
||||||
"finished with warning."),
|
|
||||||
{'backup': backup.id, 'volume': backup.volume_id})
|
{'backup': backup.id, 'volume': backup.volume_id})
|
||||||
else:
|
else:
|
||||||
LOG.debug("Delete of backup '%(backup)s' for volume "
|
LOG.debug("Delete of backup '%(backup)s' for volume "
|
||||||
|
@ -55,7 +55,6 @@ from swiftclient import client as swift
|
|||||||
from cinder.backup import chunkeddriver
|
from cinder.backup import chunkeddriver
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _
|
||||||
from cinder.i18n import _LE
|
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -215,8 +214,8 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
|
|||||||
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
|
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
|
||||||
if CONF.backup_swift_auth == 'single_user':
|
if CONF.backup_swift_auth == 'single_user':
|
||||||
if CONF.backup_swift_user is None:
|
if CONF.backup_swift_user is None:
|
||||||
LOG.error(_LE("single_user auth mode enabled, "
|
LOG.error("single_user auth mode enabled, "
|
||||||
"but %(param)s not set"),
|
"but %(param)s not set",
|
||||||
{'param': 'backup_swift_user'})
|
{'param': 'backup_swift_user'})
|
||||||
raise exception.ParameterNotFound(param='backup_swift_user')
|
raise exception.ParameterNotFound(param='backup_swift_user')
|
||||||
os_options = {}
|
os_options = {}
|
||||||
|
@ -35,7 +35,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from cinder.backup import driver
|
from cinder.backup import driver
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
@ -250,9 +250,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
|
|||||||
hardlink_path,
|
hardlink_path,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except processutils.ProcessExecutionError as exc:
|
except processutils.ProcessExecutionError as exc:
|
||||||
LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
|
LOG.error('backup: %(vol_id)s failed to remove backup hardlink '
|
||||||
'from %(vpath)s to %(bpath)s.\n'
|
'from %(vpath)s to %(bpath)s.\n'
|
||||||
'stdout: %(out)s\n stderr: %(err)s.'),
|
'stdout: %(out)s\n stderr: %(err)s.',
|
||||||
{'vol_id': volume_id,
|
{'vol_id': volume_id,
|
||||||
'vpath': volume_path,
|
'vpath': volume_path,
|
||||||
'bpath': hardlink_path,
|
'bpath': hardlink_path,
|
||||||
@ -523,8 +523,8 @@ class TSMBackupDriver(driver.BackupDriver):
|
|||||||
# log error if tsm cannot delete the backup object
|
# log error if tsm cannot delete the backup object
|
||||||
# but do not raise exception so that cinder backup
|
# but do not raise exception so that cinder backup
|
||||||
# object can be removed.
|
# object can be removed.
|
||||||
LOG.error(_LE('delete: %(vol_id)s failed with '
|
LOG.error('delete: %(vol_id)s failed with '
|
||||||
'stdout: %(out)s\n stderr: %(err)s'),
|
'stdout: %(out)s\n stderr: %(err)s',
|
||||||
{'vol_id': backup.volume_id,
|
{'vol_id': backup.volume_id,
|
||||||
'out': out,
|
'out': out,
|
||||||
'err': err})
|
'err': err})
|
||||||
|
@ -42,7 +42,7 @@ from cinder.backup import driver
|
|||||||
from cinder.backup import rpcapi as backup_rpcapi
|
from cinder.backup import rpcapi as backup_rpcapi
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import manager
|
from cinder import manager
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
@ -117,7 +117,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
LOG.debug("Got backend '%s'.", backend)
|
LOG.debug("Got backend '%s'.", backend)
|
||||||
return backend
|
return backend
|
||||||
|
|
||||||
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
|
LOG.info("Backend not found in hostname (%s) so using default.",
|
||||||
host)
|
host)
|
||||||
|
|
||||||
if 'default' not in self.volume_managers:
|
if 'default' not in self.volume_managers:
|
||||||
@ -168,15 +168,15 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
self.volume_managers['default'] = default
|
self.volume_managers['default'] = default
|
||||||
|
|
||||||
def _init_volume_driver(self, ctxt, driver):
|
def _init_volume_driver(self, ctxt, driver):
|
||||||
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
|
LOG.info("Starting volume driver %(driver_name)s (%(version)s).",
|
||||||
{'driver_name': driver.__class__.__name__,
|
{'driver_name': driver.__class__.__name__,
|
||||||
'version': driver.get_version()})
|
'version': driver.get_version()})
|
||||||
try:
|
try:
|
||||||
driver.do_setup(ctxt)
|
driver.do_setup(ctxt)
|
||||||
driver.check_for_setup_error()
|
driver.check_for_setup_error()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error encountered during initialization of "
|
LOG.exception("Error encountered during initialization of "
|
||||||
"driver: %(name)s."),
|
"driver: %(name)s.",
|
||||||
{'name': driver.__class__.__name__})
|
{'name': driver.__class__.__name__})
|
||||||
# we don't want to continue since we failed
|
# we don't want to continue since we failed
|
||||||
# to initialize the driver correctly.
|
# to initialize the driver correctly.
|
||||||
@ -213,8 +213,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
self._cleanup_incomplete_backup_operations(ctxt)
|
self._cleanup_incomplete_backup_operations(ctxt)
|
||||||
except Exception:
|
except Exception:
|
||||||
# Don't block startup of the backup service.
|
# Don't block startup of the backup service.
|
||||||
LOG.exception(_LE("Problem cleaning incomplete backup "
|
LOG.exception("Problem cleaning incomplete backup operations.")
|
||||||
"operations."))
|
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
super(BackupManager, self).reset()
|
super(BackupManager, self).reset()
|
||||||
@ -222,7 +221,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||||
|
|
||||||
def _cleanup_incomplete_backup_operations(self, ctxt):
|
def _cleanup_incomplete_backup_operations(self, ctxt):
|
||||||
LOG.info(_LI("Cleaning up incomplete backup operations."))
|
LOG.info("Cleaning up incomplete backup operations.")
|
||||||
|
|
||||||
# TODO(smulcahy) implement full resume of backup and restore
|
# TODO(smulcahy) implement full resume of backup and restore
|
||||||
# operations on restart (rather than simply resetting)
|
# operations on restart (rather than simply resetting)
|
||||||
@ -231,35 +230,35 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
try:
|
try:
|
||||||
self._cleanup_one_backup(ctxt, backup)
|
self._cleanup_one_backup(ctxt, backup)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
|
LOG.exception("Problem cleaning up backup %(bkup)s.",
|
||||||
{'bkup': backup['id']})
|
{'bkup': backup['id']})
|
||||||
try:
|
try:
|
||||||
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
|
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
|
||||||
backup)
|
backup)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Problem cleaning temp volumes and "
|
LOG.exception("Problem cleaning temp volumes and "
|
||||||
"snapshots for backup %(bkup)s."),
|
"snapshots for backup %(bkup)s.",
|
||||||
{'bkup': backup['id']})
|
{'bkup': backup['id']})
|
||||||
|
|
||||||
def _cleanup_one_volume(self, ctxt, volume):
|
def _cleanup_one_volume(self, ctxt, volume):
|
||||||
if volume['status'] == 'backing-up':
|
if volume['status'] == 'backing-up':
|
||||||
self._detach_all_attachments(ctxt, volume)
|
self._detach_all_attachments(ctxt, volume)
|
||||||
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
|
LOG.info('Resetting volume %(vol_id)s to previous '
|
||||||
'status %(status)s (was backing-up).'),
|
'status %(status)s (was backing-up).',
|
||||||
{'vol_id': volume['id'],
|
{'vol_id': volume['id'],
|
||||||
'status': volume['previous_status']})
|
'status': volume['previous_status']})
|
||||||
self.db.volume_update(ctxt, volume['id'],
|
self.db.volume_update(ctxt, volume['id'],
|
||||||
{'status': volume['previous_status']})
|
{'status': volume['previous_status']})
|
||||||
elif volume['status'] == 'restoring-backup':
|
elif volume['status'] == 'restoring-backup':
|
||||||
self._detach_all_attachments(ctxt, volume)
|
self._detach_all_attachments(ctxt, volume)
|
||||||
LOG.info(_LI('setting volume %s to error_restoring '
|
LOG.info('Setting volume %s to error_restoring '
|
||||||
'(was restoring-backup).'), volume['id'])
|
'(was restoring-backup).', volume['id'])
|
||||||
self.db.volume_update(ctxt, volume['id'],
|
self.db.volume_update(ctxt, volume['id'],
|
||||||
{'status': 'error_restoring'})
|
{'status': 'error_restoring'})
|
||||||
|
|
||||||
def _cleanup_one_backup(self, ctxt, backup):
|
def _cleanup_one_backup(self, ctxt, backup):
|
||||||
if backup['status'] == fields.BackupStatus.CREATING:
|
if backup['status'] == fields.BackupStatus.CREATING:
|
||||||
LOG.info(_LI('Resetting backup %s to error (was creating).'),
|
LOG.info('Resetting backup %s to error (was creating).',
|
||||||
backup['id'])
|
backup['id'])
|
||||||
|
|
||||||
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
|
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
|
||||||
@ -268,8 +267,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
err = 'incomplete backup reset on manager restart'
|
err = 'incomplete backup reset on manager restart'
|
||||||
self._update_backup_error(backup, err)
|
self._update_backup_error(backup, err)
|
||||||
elif backup['status'] == fields.BackupStatus.RESTORING:
|
elif backup['status'] == fields.BackupStatus.RESTORING:
|
||||||
LOG.info(_LI('Resetting backup %s to '
|
LOG.info('Resetting backup %s to '
|
||||||
'available (was restoring).'),
|
'available (was restoring).',
|
||||||
backup['id'])
|
backup['id'])
|
||||||
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
|
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
|
||||||
self._cleanup_one_volume(ctxt, volume)
|
self._cleanup_one_volume(ctxt, volume)
|
||||||
@ -277,7 +276,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
backup.status = fields.BackupStatus.AVAILABLE
|
backup.status = fields.BackupStatus.AVAILABLE
|
||||||
backup.save()
|
backup.save()
|
||||||
elif backup['status'] == fields.BackupStatus.DELETING:
|
elif backup['status'] == fields.BackupStatus.DELETING:
|
||||||
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
|
LOG.info('Resuming delete on backup: %s.', backup['id'])
|
||||||
if CONF.backup_service_inithost_offload:
|
if CONF.backup_service_inithost_offload:
|
||||||
# Offload all the pending backup delete operations to the
|
# Offload all the pending backup delete operations to the
|
||||||
# threadpool to prevent the main backup service thread
|
# threadpool to prevent the main backup service thread
|
||||||
@ -296,8 +295,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
rpcapi = self.volume_rpcapi
|
rpcapi = self.volume_rpcapi
|
||||||
rpcapi.detach_volume(ctxt, volume, attachment['id'])
|
rpcapi.detach_volume(ctxt, volume, attachment['id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Detach attachment %(attach_id)s"
|
LOG.exception("Detach attachment %(attach_id)s failed.",
|
||||||
" failed."),
|
|
||||||
{'attach_id': attachment['id']},
|
{'attach_id': attachment['id']},
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@ -359,8 +357,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
volume_id = backup.volume_id
|
volume_id = backup.volume_id
|
||||||
volume = objects.Volume.get_by_id(context, volume_id)
|
volume = objects.Volume.get_by_id(context, volume_id)
|
||||||
previous_status = volume.get('previous_status', None)
|
previous_status = volume.get('previous_status', None)
|
||||||
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
|
LOG.info('Create backup started, backup: %(backup_id)s '
|
||||||
'volume: %(volume_id)s.'),
|
'volume: %(volume_id)s.',
|
||||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||||
|
|
||||||
self._notify_about_backup_usage(context, backup, "create.start")
|
self._notify_about_backup_usage(context, backup, "create.start")
|
||||||
@ -417,7 +415,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
backup.parent_id)
|
backup.parent_id)
|
||||||
parent_backup.num_dependent_backups += 1
|
parent_backup.num_dependent_backups += 1
|
||||||
parent_backup.save()
|
parent_backup.save()
|
||||||
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
|
LOG.info('Create backup finished. backup: %s.', backup.id)
|
||||||
self._notify_about_backup_usage(context, backup, "create.end")
|
self._notify_about_backup_usage(context, backup, "create.end")
|
||||||
|
|
||||||
def _run_backup(self, context, backup, volume):
|
def _run_backup(self, context, backup, volume):
|
||||||
@ -457,8 +455,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
|
|
||||||
def restore_backup(self, context, backup, volume_id):
|
def restore_backup(self, context, backup, volume_id):
|
||||||
"""Restore volume backups from configured backup service."""
|
"""Restore volume backups from configured backup service."""
|
||||||
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
|
LOG.info('Restore backup started, backup: %(backup_id)s '
|
||||||
'volume: %(volume_id)s.'),
|
'volume: %(volume_id)s.',
|
||||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||||
|
|
||||||
volume = objects.Volume.get_by_id(context, volume_id)
|
volume = objects.Volume.get_by_id(context, volume_id)
|
||||||
@ -490,9 +488,9 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
raise exception.InvalidBackup(reason=err)
|
raise exception.InvalidBackup(reason=err)
|
||||||
|
|
||||||
if volume['size'] > backup['size']:
|
if volume['size'] > backup['size']:
|
||||||
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
|
LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is '
|
||||||
'larger than backup: %(backup_id)s, '
|
'larger than backup: %(backup_id)s, '
|
||||||
'size: %(backup_size)d, continuing with restore.'),
|
'size: %(backup_size)d, continuing with restore.',
|
||||||
{'vol_id': volume['id'],
|
{'vol_id': volume['id'],
|
||||||
'vol_size': volume['size'],
|
'vol_size': volume['size'],
|
||||||
'backup_id': backup['id'],
|
'backup_id': backup['id'],
|
||||||
@ -525,8 +523,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
self.db.volume_update(context, volume_id, {'status': 'available'})
|
self.db.volume_update(context, volume_id, {'status': 'available'})
|
||||||
backup.status = fields.BackupStatus.AVAILABLE
|
backup.status = fields.BackupStatus.AVAILABLE
|
||||||
backup.save()
|
backup.save()
|
||||||
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
|
LOG.info('Restore backup finished, backup %(backup_id)s restored'
|
||||||
' to volume %(volume_id)s.'),
|
' to volume %(volume_id)s.',
|
||||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||||
self._notify_about_backup_usage(context, backup, "restore.end")
|
self._notify_about_backup_usage(context, backup, "restore.end")
|
||||||
|
|
||||||
@ -557,7 +555,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
|
|
||||||
def delete_backup(self, context, backup):
|
def delete_backup(self, context, backup):
|
||||||
"""Delete volume backup from configured backup service."""
|
"""Delete volume backup from configured backup service."""
|
||||||
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
|
LOG.info('Delete backup started, backup: %s.', backup.id)
|
||||||
|
|
||||||
self._notify_about_backup_usage(context, backup, "delete.start")
|
self._notify_about_backup_usage(context, backup, "delete.start")
|
||||||
backup.host = self.host
|
backup.host = self.host
|
||||||
@ -604,7 +602,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
**reserve_opts)
|
**reserve_opts)
|
||||||
except Exception:
|
except Exception:
|
||||||
reservations = None
|
reservations = None
|
||||||
LOG.exception(_LE("Failed to update usages deleting backup"))
|
LOG.exception("Failed to update usages deleting backup")
|
||||||
|
|
||||||
backup.destroy()
|
backup.destroy()
|
||||||
# If this backup is incremental backup, handle the
|
# If this backup is incremental backup, handle the
|
||||||
@ -620,7 +618,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
QUOTAS.commit(context, reservations,
|
QUOTAS.commit(context, reservations,
|
||||||
project_id=backup.project_id)
|
project_id=backup.project_id)
|
||||||
|
|
||||||
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
|
LOG.info('Delete backup finished, backup %s deleted.', backup.id)
|
||||||
self._notify_about_backup_usage(context, backup, "delete.end")
|
self._notify_about_backup_usage(context, backup, "delete.end")
|
||||||
|
|
||||||
def _notify_about_backup_usage(self,
|
def _notify_about_backup_usage(self,
|
||||||
@ -646,7 +644,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
:returns: 'backup_service' describing the needed driver.
|
:returns: 'backup_service' describing the needed driver.
|
||||||
:raises: InvalidBackup
|
:raises: InvalidBackup
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
|
LOG.info('Export record started, backup: %s.', backup.id)
|
||||||
|
|
||||||
expected_status = fields.BackupStatus.AVAILABLE
|
expected_status = fields.BackupStatus.AVAILABLE
|
||||||
actual_status = backup.status
|
actual_status = backup.status
|
||||||
@ -680,7 +678,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
msg = six.text_type(err)
|
msg = six.text_type(err)
|
||||||
raise exception.InvalidBackup(reason=msg)
|
raise exception.InvalidBackup(reason=msg)
|
||||||
|
|
||||||
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
|
LOG.info('Export record finished, backup %s exported.', backup.id)
|
||||||
return backup_record
|
return backup_record
|
||||||
|
|
||||||
def import_record(self,
|
def import_record(self,
|
||||||
@ -699,7 +697,7 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
:raises: InvalidBackup
|
:raises: InvalidBackup
|
||||||
:raises: ServiceNotFound
|
:raises: ServiceNotFound
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
|
LOG.info('Import record started, backup_url: %s.', backup_url)
|
||||||
|
|
||||||
# Can we import this backup?
|
# Can we import this backup?
|
||||||
if (backup_service != self.driver_name):
|
if (backup_service != self.driver_name):
|
||||||
@ -783,9 +781,9 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
if isinstance(backup_service, driver.BackupDriverWithVerify):
|
if isinstance(backup_service, driver.BackupDriverWithVerify):
|
||||||
backup_service.verify(backup.id)
|
backup_service.verify(backup.id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Backup service %(service)s does not '
|
LOG.warning('Backup service %(service)s does not '
|
||||||
'support verify. Backup id %(id)s is '
|
'support verify. Backup id %(id)s is '
|
||||||
'not verified. Skipping verify.'),
|
'not verified. Skipping verify.',
|
||||||
{'service': self.driver_name,
|
{'service': self.driver_name,
|
||||||
'id': backup.id})
|
'id': backup.id})
|
||||||
except exception.InvalidBackup as err:
|
except exception.InvalidBackup as err:
|
||||||
@ -796,8 +794,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
backup.update({"status": fields.BackupStatus.AVAILABLE})
|
backup.update({"status": fields.BackupStatus.AVAILABLE})
|
||||||
backup.save()
|
backup.save()
|
||||||
|
|
||||||
LOG.info(_LI('Import record id %s metadata from driver '
|
LOG.info('Import record id %s metadata from driver '
|
||||||
'finished.'), backup.id)
|
'finished.', backup.id)
|
||||||
|
|
||||||
def reset_status(self, context, backup, status):
|
def reset_status(self, context, backup, status):
|
||||||
"""Reset volume backup status.
|
"""Reset volume backup status.
|
||||||
@ -809,13 +807,13 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
:raises: BackupVerifyUnsupportedDriver
|
:raises: BackupVerifyUnsupportedDriver
|
||||||
:raises: AttributeError
|
:raises: AttributeError
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Reset backup status started, backup_id: '
|
LOG.info('Reset backup status started, backup_id: '
|
||||||
'%(backup_id)s, status: %(status)s.'),
|
'%(backup_id)s, status: %(status)s.',
|
||||||
{'backup_id': backup.id,
|
{'backup_id': backup.id,
|
||||||
'status': status})
|
'status': status})
|
||||||
|
|
||||||
backup_service_name = self._map_service_to_driver(backup.service)
|
backup_service_name = self._map_service_to_driver(backup.service)
|
||||||
LOG.info(_LI('Backup service: %s.'), backup_service_name)
|
LOG.info('Backup service: %s.', backup_service_name)
|
||||||
if backup_service_name is not None:
|
if backup_service_name is not None:
|
||||||
configured_service = self.driver_name
|
configured_service = self.driver_name
|
||||||
if backup_service_name != configured_service:
|
if backup_service_name != configured_service:
|
||||||
@ -857,14 +855,14 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
backup.save()
|
backup.save()
|
||||||
except exception.InvalidBackup:
|
except exception.InvalidBackup:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Backup id %s is not invalid. "
|
LOG.error("Backup id %s is not invalid. Skipping reset.",
|
||||||
"Skipping reset."), backup.id)
|
backup.id)
|
||||||
except exception.BackupVerifyUnsupportedDriver:
|
except exception.BackupVerifyUnsupportedDriver:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Backup service %(configured_service)s '
|
LOG.error('Backup service %(configured_service)s '
|
||||||
'does not support verify. Backup id '
|
'does not support verify. Backup id '
|
||||||
'%(id)s is not verified. '
|
'%(id)s is not verified. '
|
||||||
'Skipping verify.'),
|
'Skipping verify.',
|
||||||
{'configured_service': self.driver_name,
|
{'configured_service': self.driver_name,
|
||||||
'id': backup.id})
|
'id': backup.id})
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -882,8 +880,8 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
self._cleanup_temp_volumes_snapshots_for_one_backup(
|
self._cleanup_temp_volumes_snapshots_for_one_backup(
|
||||||
context, backup)
|
context, backup)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Problem cleaning temp volumes and "
|
LOG.exception("Problem cleaning temp volumes and "
|
||||||
"snapshots for backup %(bkup)s."),
|
"snapshots for backup %(bkup)s.",
|
||||||
{'bkup': backup.id})
|
{'bkup': backup.id})
|
||||||
|
|
||||||
# send notification to ceilometer
|
# send notification to ceilometer
|
||||||
@ -928,9 +926,9 @@ class BackupManager(manager.ThreadPoolManager):
|
|||||||
properties,
|
properties,
|
||||||
force=True)
|
force=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Failed to terminate the connection "
|
LOG.warning("Failed to terminate the connection "
|
||||||
"of volume %(volume_id)s, but it is "
|
"of volume %(volume_id)s, but it is "
|
||||||
"acceptable."),
|
"acceptable.",
|
||||||
{'volume_id', volume.id})
|
{'volume_id', volume.id})
|
||||||
|
|
||||||
def _connect_device(self, conn):
|
def _connect_device(self, conn):
|
||||||
|
@ -28,7 +28,6 @@ from oslo_utils import excutils
|
|||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LI
|
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
@ -97,14 +96,14 @@ class LVM(executor.Executor):
|
|||||||
try:
|
try:
|
||||||
self._create_vg(physical_volumes)
|
self._create_vg(physical_volumes)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error creating Volume Group'))
|
LOG.exception('Error creating Volume Group')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
|
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
|
||||||
|
|
||||||
if self._vg_exists() is False:
|
if self._vg_exists() is False:
|
||||||
LOG.error(_LE('Unable to locate Volume Group %s'), vg_name)
|
LOG.error('Unable to locate Volume Group %s', vg_name)
|
||||||
raise exception.VolumeGroupNotFound(vg_name=vg_name)
|
raise exception.VolumeGroupNotFound(vg_name=vg_name)
|
||||||
|
|
||||||
# NOTE: we assume that the VG has been activated outside of Cinder
|
# NOTE: we assume that the VG has been activated outside of Cinder
|
||||||
@ -180,10 +179,10 @@ class LVM(executor.Executor):
|
|||||||
free_space = pool_size - consumed_space
|
free_space = pool_size - consumed_space
|
||||||
free_space = round(free_space, 2)
|
free_space = round(free_space, 2)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error querying thin pool about data_percent'))
|
LOG.exception('Error querying thin pool about data_percent')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
|
|
||||||
return free_space
|
return free_space
|
||||||
|
|
||||||
@ -300,8 +299,8 @@ class LVM(executor.Executor):
|
|||||||
with excutils.save_and_reraise_exception(reraise=True) as ctx:
|
with excutils.save_and_reraise_exception(reraise=True) as ctx:
|
||||||
if "not found" in err.stderr or "Failed to find" in err.stderr:
|
if "not found" in err.stderr or "Failed to find" in err.stderr:
|
||||||
ctx.reraise = False
|
ctx.reraise = False
|
||||||
LOG.info(_LI("Logical Volume not found when querying "
|
LOG.info("Logical Volume not found when querying "
|
||||||
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"),
|
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
|
||||||
{'vg': vg_name, 'lv': lv_name})
|
{'vg': vg_name, 'lv': lv_name})
|
||||||
out = None
|
out = None
|
||||||
|
|
||||||
@ -416,7 +415,7 @@ class LVM(executor.Executor):
|
|||||||
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
|
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
|
||||||
|
|
||||||
if len(vg_list) != 1:
|
if len(vg_list) != 1:
|
||||||
LOG.error(_LE('Unable to find VG: %s'), self.vg_name)
|
LOG.error('Unable to find VG: %s', self.vg_name)
|
||||||
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
|
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
|
||||||
|
|
||||||
self.vg_size = float(vg_list[0]['size'])
|
self.vg_size = float(vg_list[0]['size'])
|
||||||
@ -503,9 +502,9 @@ class LVM(executor.Executor):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if not self.supports_thin_provisioning(self._root_helper):
|
if not self.supports_thin_provisioning(self._root_helper):
|
||||||
LOG.error(_LE('Requested to setup thin provisioning, '
|
LOG.error('Requested to setup thin provisioning, '
|
||||||
'however current LVM version does not '
|
'however current LVM version does not '
|
||||||
'support it.'))
|
'support it.')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if name is None:
|
if name is None:
|
||||||
@ -563,11 +562,11 @@ class LVM(executor.Executor):
|
|||||||
root_helper=self._root_helper,
|
root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error creating Volume'))
|
LOG.exception('Error creating Volume')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
LOG.error(_LE('Current state: %s'), self.get_all_volume_groups())
|
LOG.error('Current state: %s', self.get_all_volume_groups())
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@utils.retry(putils.ProcessExecutionError)
|
@utils.retry(putils.ProcessExecutionError)
|
||||||
@ -581,7 +580,7 @@ class LVM(executor.Executor):
|
|||||||
"""
|
"""
|
||||||
source_lvref = self.get_volume(source_lv_name)
|
source_lvref = self.get_volume(source_lv_name)
|
||||||
if source_lvref is None:
|
if source_lvref is None:
|
||||||
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
|
LOG.error("Trying to create snapshot by non-existent LV: %s",
|
||||||
source_lv_name)
|
source_lv_name)
|
||||||
raise exception.VolumeDeviceNotFound(device=source_lv_name)
|
raise exception.VolumeDeviceNotFound(device=source_lv_name)
|
||||||
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
|
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
|
||||||
@ -595,10 +594,10 @@ class LVM(executor.Executor):
|
|||||||
root_helper=self._root_helper,
|
root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error creating snapshot'))
|
LOG.exception('Error creating snapshot')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _mangle_lv_name(self, name):
|
def _mangle_lv_name(self, name):
|
||||||
@ -629,10 +628,10 @@ class LVM(executor.Executor):
|
|||||||
root_helper=self._root_helper,
|
root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error deactivating LV'))
|
LOG.exception('Error deactivating LV')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Wait until lv is deactivated to return in
|
# Wait until lv is deactivated to return in
|
||||||
@ -686,10 +685,10 @@ class LVM(executor.Executor):
|
|||||||
root_helper=self._root_helper,
|
root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error activating LV'))
|
LOG.exception('Error activating LV')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@utils.retry(putils.ProcessExecutionError)
|
@utils.retry(putils.ProcessExecutionError)
|
||||||
@ -813,10 +812,10 @@ class LVM(executor.Executor):
|
|||||||
self._execute(*cmd, root_helper=self._root_helper,
|
self._execute(*cmd, root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error extending Volume'))
|
LOG.exception('Error extending Volume')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def vg_mirror_free_space(self, mirror_count):
|
def vg_mirror_free_space(self, mirror_count):
|
||||||
@ -851,8 +850,8 @@ class LVM(executor.Executor):
|
|||||||
root_helper=self._root_helper,
|
root_helper=self._root_helper,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error renaming logical volume'))
|
LOG.exception('Error renaming logical volume')
|
||||||
LOG.error(_LE('Cmd :%s'), err.cmd)
|
LOG.error('Cmd :%s', err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s'), err.stdout)
|
LOG.error('StdOut :%s', err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s'), err.stderr)
|
LOG.error('StdErr :%s', err.stderr)
|
||||||
raise
|
raise
|
||||||
|
@ -46,7 +46,7 @@ i18n.enable_lazy()
|
|||||||
# Need to register global_opts
|
# Need to register global_opts
|
||||||
from cinder.common import config # noqa
|
from cinder.common import config # noqa
|
||||||
from cinder.db import api as session
|
from cinder.db import api as session
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder import service
|
from cinder import service
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder import version
|
from cinder import version
|
||||||
@ -109,9 +109,9 @@ def main():
|
|||||||
launcher.launch_service(server)
|
launcher.launch_service(server)
|
||||||
service_started = True
|
service_started = True
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Configuration for cinder-volume does not specify '
|
LOG.error('Configuration for cinder-volume does not specify '
|
||||||
'"enabled_backends". Using DEFAULT section to configure '
|
'"enabled_backends". Using DEFAULT section to configure '
|
||||||
'drivers is not supported since Ocata.'))
|
'drivers is not supported since Ocata.')
|
||||||
|
|
||||||
if not service_started:
|
if not service_started:
|
||||||
msg = _('No volume service(s) started successfully, terminating.')
|
msg = _('No volume service(s) started successfully, terminating.')
|
||||||
|
@ -48,7 +48,7 @@ from oslo_log import log as logging
|
|||||||
from cinder import i18n
|
from cinder import i18n
|
||||||
i18n.enable_lazy()
|
i18n.enable_lazy()
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -104,7 +104,7 @@ def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
|
|||||||
cinder.volume.utils.notify_about_volume_usage(
|
cinder.volume.utils.notify_about_volume_usage(
|
||||||
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
|
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
|
||||||
except Exception as exc_msg:
|
except Exception as exc_msg:
|
||||||
LOG.error(_LE("Exists volume notification failed: %s"),
|
LOG.error("Exists volume notification failed: %s",
|
||||||
exc_msg, resource=volume_ref)
|
exc_msg, resource=volume_ref)
|
||||||
|
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
|
|||||||
cinder.volume.utils.notify_about_snapshot_usage(
|
cinder.volume.utils.notify_about_snapshot_usage(
|
||||||
admin_context, snapshot_ref, 'exists', extra_info)
|
admin_context, snapshot_ref, 'exists', extra_info)
|
||||||
except Exception as exc_msg:
|
except Exception as exc_msg:
|
||||||
LOG.error(_LE("Exists snapshot notification failed: %s"),
|
LOG.error("Exists snapshot notification failed: %s",
|
||||||
exc_msg, resource=snapshot_ref)
|
exc_msg, resource=snapshot_ref)
|
||||||
|
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
|
|||||||
'project_id': backup_ref.project_id,
|
'project_id': backup_ref.project_id,
|
||||||
'extra_info': extra_info})
|
'extra_info': extra_info})
|
||||||
except Exception as exc_msg:
|
except Exception as exc_msg:
|
||||||
LOG.error(_LE("Exists backups notification failed: %s"), exc_msg)
|
LOG.error("Exists backups notification failed: %s", exc_msg)
|
||||||
|
|
||||||
|
|
||||||
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
|
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
|
||||||
@ -155,7 +155,7 @@ def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
|
|||||||
notify_about_usage(admin_context, obj_ref,
|
notify_about_usage(admin_context, obj_ref,
|
||||||
'create.end', extra_usage_info=local_extra_info)
|
'create.end', extra_usage_info=local_extra_info)
|
||||||
except Exception as exc_msg:
|
except Exception as exc_msg:
|
||||||
LOG.error(_LE("Create %(type)s notification failed: %(exc_msg)s"),
|
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
|
||||||
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
|
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
|
||||||
|
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
|
|||||||
notify_about_usage(admin_context, obj_ref,
|
notify_about_usage(admin_context, obj_ref,
|
||||||
'delete.end', extra_usage_info=local_extra_info)
|
'delete.end', extra_usage_info=local_extra_info)
|
||||||
except Exception as exc_msg:
|
except Exception as exc_msg:
|
||||||
LOG.error(_LE("Delete %(type)s notification failed: %(exc_msg)s"),
|
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
|
||||||
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
|
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
|
||||||
|
|
||||||
|
|
||||||
@ -206,9 +206,9 @@ def main():
|
|||||||
begin, end = utils.last_completed_audit_period()
|
begin, end = utils.last_completed_audit_period()
|
||||||
begin, end = _time_error(LOG, begin, end)
|
begin, end = _time_error(LOG, begin, end)
|
||||||
|
|
||||||
LOG.info(_LI("Starting volume usage audit"))
|
LOG.info("Starting volume usage audit")
|
||||||
msg = _LI("Creating usages for %(begin_period)s until %(end_period)s")
|
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
|
||||||
LOG.info(msg, {"begin_period": str(begin), "end_period": str(end)})
|
{"begin_period": begin, "end_period": end})
|
||||||
|
|
||||||
extra_info = {
|
extra_info = {
|
||||||
'audit_period_beginning': str(begin),
|
'audit_period_beginning': str(begin),
|
||||||
@ -219,7 +219,7 @@ def main():
|
|||||||
begin,
|
begin,
|
||||||
end)
|
end)
|
||||||
|
|
||||||
LOG.info(_LI("Found %d volumes"), len(volumes))
|
LOG.info("Found %d volumes", len(volumes))
|
||||||
for volume_ref in volumes:
|
for volume_ref in volumes:
|
||||||
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
|
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
|
||||||
admin_context, begin, end,
|
admin_context, begin, end,
|
||||||
@ -228,7 +228,7 @@ def main():
|
|||||||
|
|
||||||
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
|
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
|
||||||
begin, end)
|
begin, end)
|
||||||
LOG.info(_LI("Found %d snapshots"), len(snapshots))
|
LOG.info("Found %d snapshots", len(snapshots))
|
||||||
for snapshot_ref in snapshots:
|
for snapshot_ref in snapshots:
|
||||||
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
|
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
|
||||||
admin_context, begin,
|
admin_context, begin,
|
||||||
@ -238,10 +238,10 @@ def main():
|
|||||||
backups = objects.BackupList.get_all_active_by_window(admin_context,
|
backups = objects.BackupList.get_all_active_by_window(admin_context,
|
||||||
begin, end)
|
begin, end)
|
||||||
|
|
||||||
LOG.info(_LI("Found %d backups"), len(backups))
|
LOG.info("Found %d backups", len(backups))
|
||||||
for backup_ref in backups:
|
for backup_ref in backups:
|
||||||
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
|
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
|
||||||
admin_context, begin,
|
admin_context, begin,
|
||||||
end, cinder.volume.utils.notify_about_backup_usage,
|
end, cinder.volume.utils.notify_about_backup_usage,
|
||||||
"backup_id", "backup")
|
"backup_id", "backup")
|
||||||
LOG.info(_LI("Volume usage audit completed"))
|
LOG.info("Volume usage audit completed")
|
||||||
|
@ -27,7 +27,7 @@ from sqlalchemy.sql import type_api
|
|||||||
|
|
||||||
from cinder.db import api
|
from cinder.db import api
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -96,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
if 'id' not in sort_keys:
|
if 'id' not in sort_keys:
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
# the actual primary key, rather than assuming its id
|
# the actual primary key, rather than assuming its id
|
||||||
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
LOG.warning('Id not in sort_keys; is sort_keys unique?')
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from oslo_utils import timeutils
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields as c_fields
|
from cinder.objects import fields as c_fields
|
||||||
import cinder.policy
|
import cinder.policy
|
||||||
@ -110,8 +110,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
valid = self._valid_availability_zone(availability_zone)
|
valid = self._valid_availability_zone(availability_zone)
|
||||||
if not valid:
|
if not valid:
|
||||||
msg = _LW(
|
msg = _("Availability zone '%s' is invalid.") % availability_zone
|
||||||
"Availability zone '%s' is invalid") % (availability_zone)
|
|
||||||
LOG.warning(msg)
|
LOG.warning(msg)
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
@ -148,8 +147,8 @@ class API(base.Base):
|
|||||||
group.create()
|
group.create()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating consistency group"
|
LOG.error("Error occurred when creating consistency group "
|
||||||
" %s."), name)
|
"%s.", name)
|
||||||
|
|
||||||
request_spec_list = []
|
request_spec_list = []
|
||||||
filter_properties_list = []
|
filter_properties_list = []
|
||||||
@ -189,19 +188,19 @@ class API(base.Base):
|
|||||||
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
|
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
|
||||||
except exception.ConsistencyGroupNotFound:
|
except exception.ConsistencyGroupNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Source CG %(source_cg)s not found when "
|
LOG.error("Source CG %(source_cg)s not found when "
|
||||||
"creating consistency group %(cg)s from "
|
"creating consistency group %(cg)s from "
|
||||||
"source."),
|
"source.",
|
||||||
{'cg': name, 'source_cg': source_cgid})
|
{'cg': name, 'source_cg': source_cgid})
|
||||||
except exception.CgSnapshotNotFound:
|
except exception.CgSnapshotNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("CG snapshot %(cgsnap)s not found when creating "
|
LOG.error("CG snapshot %(cgsnap)s not found when creating "
|
||||||
"consistency group %(cg)s from source."),
|
"consistency group %(cg)s from source.",
|
||||||
{'cg': name, 'cgsnap': cgsnapshot_id})
|
{'cg': name, 'cgsnap': cgsnapshot_id})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating consistency group"
|
LOG.error("Error occurred when creating consistency group"
|
||||||
" %(cg)s from cgsnapshot %(cgsnap)s."),
|
" %(cg)s from cgsnapshot %(cgsnap)s.",
|
||||||
{'cg': name, 'cgsnap': cgsnapshot_id})
|
{'cg': name, 'cgsnap': cgsnapshot_id})
|
||||||
|
|
||||||
# Update quota for consistencygroups
|
# Update quota for consistencygroups
|
||||||
@ -257,10 +256,10 @@ class API(base.Base):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating volume "
|
LOG.error("Error occurred when creating volume "
|
||||||
"entry from snapshot in the process of "
|
"entry from snapshot in the process of "
|
||||||
"creating consistency group %(group)s "
|
"creating consistency group %(group)s "
|
||||||
"from cgsnapshot %(cgsnap)s."),
|
"from cgsnapshot %(cgsnap)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'cgsnap': cgsnapshot.id})
|
'cgsnap': cgsnapshot.id})
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -268,9 +267,9 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating consistency "
|
LOG.error("Error occurred when creating consistency "
|
||||||
"group %(group)s from cgsnapshot "
|
"group %(group)s from cgsnapshot "
|
||||||
"%(cgsnap)s."),
|
"%(cgsnap)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'cgsnap': cgsnapshot.id})
|
'cgsnap': cgsnapshot.id})
|
||||||
|
|
||||||
@ -321,10 +320,10 @@ class API(base.Base):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating cloned "
|
LOG.error("Error occurred when creating cloned "
|
||||||
"volume in the process of creating "
|
"volume in the process of creating "
|
||||||
"consistency group %(group)s from "
|
"consistency group %(group)s from "
|
||||||
"source CG %(source_cg)s."),
|
"source CG %(source_cg)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'source_cg': source_cg.id})
|
'source_cg': source_cg.id})
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -332,9 +331,9 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating consistency "
|
LOG.error("Error occurred when creating consistency "
|
||||||
"group %(group)s from source CG "
|
"group %(group)s from source CG "
|
||||||
"%(source_cg)s."),
|
"%(source_cg)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'source_cg': source_cg.id})
|
'source_cg': source_cg.id})
|
||||||
|
|
||||||
@ -390,9 +389,9 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when building "
|
LOG.error("Error occurred when building "
|
||||||
"request spec list for consistency group "
|
"request spec list for consistency group "
|
||||||
"%s."), group.id)
|
"%s.", group.id)
|
||||||
|
|
||||||
# Cast to the scheduler and let it handle whatever is needed
|
# Cast to the scheduler and let it handle whatever is needed
|
||||||
# to select the target host for this group.
|
# to select the target host for this group.
|
||||||
@ -418,8 +417,8 @@ class API(base.Base):
|
|||||||
quota_utils.process_reserve_over_quota(
|
quota_utils.process_reserve_over_quota(
|
||||||
context, e, resource='groups')
|
context, e, resource='groups')
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Failed to update quota for "
|
LOG.error("Failed to update quota for "
|
||||||
"consistency group %s."), group.id)
|
"consistency group %s.", group.id)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def delete(self, context, group, force=False):
|
def delete(self, context, group, force=False):
|
||||||
@ -749,8 +748,8 @@ class API(base.Base):
|
|||||||
if cgsnapshot.obj_attr_is_set('id'):
|
if cgsnapshot.obj_attr_is_set('id'):
|
||||||
cgsnapshot.destroy()
|
cgsnapshot.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating cgsnapshot"
|
LOG.error("Error occurred when creating cgsnapshot"
|
||||||
" %s."), cgsnapshot_id)
|
" %s.", cgsnapshot_id)
|
||||||
|
|
||||||
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)
|
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder import policy
|
from cinder import policy
|
||||||
|
|
||||||
context_opts = [
|
context_opts = [
|
||||||
@ -214,6 +214,6 @@ def get_internal_tenant_context():
|
|||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
is_admin=True)
|
is_admin=True)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unable to get internal tenant context: Missing '
|
LOG.warning('Unable to get internal tenant context: Missing '
|
||||||
'required config parameters.'))
|
'required config parameters.')
|
||||||
return None
|
return None
|
||||||
|
@ -32,7 +32,7 @@ from tooz import coordination
|
|||||||
from tooz import locking
|
from tooz import locking
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@ -94,9 +94,9 @@ class Coordinator(object):
|
|||||||
self._ev = eventlet.spawn(
|
self._ev = eventlet.spawn(
|
||||||
lambda: tpool.execute(self.heartbeat))
|
lambda: tpool.execute(self.heartbeat))
|
||||||
except coordination.ToozError:
|
except coordination.ToozError:
|
||||||
LOG.exception(_LE('Error starting coordination backend.'))
|
LOG.exception('Error starting coordination backend.')
|
||||||
raise
|
raise
|
||||||
LOG.info(_LI('Coordination backend started successfully.'))
|
LOG.info('Coordination backend started successfully.')
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""Disconnect from coordination backend and stop heartbeat."""
|
"""Disconnect from coordination backend and stop heartbeat."""
|
||||||
@ -154,17 +154,17 @@ class Coordinator(object):
|
|||||||
self.coordinator.heartbeat()
|
self.coordinator.heartbeat()
|
||||||
return True
|
return True
|
||||||
except coordination.ToozConnectionError:
|
except coordination.ToozConnectionError:
|
||||||
LOG.exception(_LE('Connection error while sending a heartbeat '
|
LOG.exception('Connection error while sending a heartbeat '
|
||||||
'to coordination backend.'))
|
'to coordination backend.')
|
||||||
raise
|
raise
|
||||||
except coordination.ToozError:
|
except coordination.ToozError:
|
||||||
LOG.exception(_LE('Error sending a heartbeat to coordination '
|
LOG.exception('Error sending a heartbeat to coordination '
|
||||||
'backend.'))
|
'backend.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _reconnect(self):
|
def _reconnect(self):
|
||||||
"""Reconnect with jittered exponential backoff increase."""
|
"""Reconnect with jittered exponential backoff increase."""
|
||||||
LOG.info(_LI('Reconnecting to coordination backend.'))
|
LOG.info('Reconnecting to coordination backend.')
|
||||||
cap = cfg.CONF.coordination.max_reconnect_backoff
|
cap = cfg.CONF.coordination.max_reconnect_backoff
|
||||||
backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
|
backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
|
||||||
for attempt in itertools.count(1):
|
for attempt in itertools.count(1):
|
||||||
@ -173,11 +173,11 @@ class Coordinator(object):
|
|||||||
break
|
break
|
||||||
except coordination.ToozError:
|
except coordination.ToozError:
|
||||||
backoff = min(cap, random.uniform(base, backoff * 3))
|
backoff = min(cap, random.uniform(base, backoff * 3))
|
||||||
msg = _LW('Reconnect attempt %(attempt)s failed. '
|
msg = ('Reconnect attempt %(attempt)s failed. '
|
||||||
'Next try in %(backoff).2fs.')
|
'Next try in %(backoff).2fs.')
|
||||||
LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
|
LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
|
||||||
self._dead.wait(backoff)
|
self._dead.wait(backoff)
|
||||||
LOG.info(_LI('Reconnected to coordination backend.'))
|
LOG.info('Reconnected to coordination backend.')
|
||||||
|
|
||||||
|
|
||||||
COORDINATOR = Coordinator(prefix='cinder-')
|
COORDINATOR = Coordinator(prefix='cinder-')
|
||||||
|
@ -57,7 +57,7 @@ from cinder.common import sqlalchemyutils
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder.db.sqlalchemy import models
|
from cinder.db.sqlalchemy import models
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ def get_backend():
|
|||||||
def is_admin_context(context):
|
def is_admin_context(context):
|
||||||
"""Indicates if the request context is an administrator."""
|
"""Indicates if the request context is an administrator."""
|
||||||
if not context:
|
if not context:
|
||||||
LOG.warning(_LW('Use of empty request context is deprecated'),
|
LOG.warning('Use of empty request context is deprecated',
|
||||||
DeprecationWarning)
|
DeprecationWarning)
|
||||||
raise Exception('die')
|
raise Exception('die')
|
||||||
return context.is_admin
|
return context.is_admin
|
||||||
@ -234,8 +234,8 @@ def _retry_on_deadlock(f):
|
|||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
except db_exc.DBDeadlock:
|
except db_exc.DBDeadlock:
|
||||||
LOG.warning(_LW("Deadlock detected when running "
|
LOG.warning("Deadlock detected when running "
|
||||||
"'%(func_name)s': Retrying..."),
|
"'%(func_name)s': Retrying...",
|
||||||
dict(func_name=f.__name__))
|
dict(func_name=f.__name__))
|
||||||
# Retry!
|
# Retry!
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
@ -1277,8 +1277,8 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
|||||||
usages[resource].reserved += delta
|
usages[resource].reserved += delta
|
||||||
|
|
||||||
if unders:
|
if unders:
|
||||||
LOG.warning(_LW("Change will make usage less than 0 for the following "
|
LOG.warning("Change will make usage less than 0 for the following "
|
||||||
"resources: %s"), unders)
|
"resources: %s", unders)
|
||||||
if overs:
|
if overs:
|
||||||
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
|
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
|
||||||
allocated=allocated.get(k, 0))
|
allocated=allocated.get(k, 0))
|
||||||
@ -3898,8 +3898,7 @@ def volume_type_destroy(context, id):
|
|||||||
session=session).filter(
|
session=session).filter(
|
||||||
models.ConsistencyGroup.volume_type_id.contains(id)).count()
|
models.ConsistencyGroup.volume_type_id.contains(id)).count()
|
||||||
if results or group_count or cg_count:
|
if results or group_count or cg_count:
|
||||||
LOG.error(_LE('VolumeType %s deletion failed, '
|
LOG.error('VolumeType %s deletion failed, VolumeType in use.', id)
|
||||||
'VolumeType in use.'), id)
|
|
||||||
raise exception.VolumeTypeInUse(volume_type_id=id)
|
raise exception.VolumeTypeInUse(volume_type_id=id)
|
||||||
updated_values = {'deleted': True,
|
updated_values = {'deleted': True,
|
||||||
'deleted_at': utcnow,
|
'deleted_at': utcnow,
|
||||||
@ -3929,8 +3928,8 @@ def group_type_destroy(context, id):
|
|||||||
# results = model_query(context, models.Group, session=session). \
|
# results = model_query(context, models.Group, session=session). \
|
||||||
# filter_by(group_type_id=id).all()
|
# filter_by(group_type_id=id).all()
|
||||||
# if results:
|
# if results:
|
||||||
# LOG.error(_LE('GroupType %s deletion failed, '
|
# LOG.error('GroupType %s deletion failed, '
|
||||||
# 'GroupType in use.'), id)
|
# 'GroupType in use.', id)
|
||||||
# raise exception.GroupTypeInUse(group_type_id=id)
|
# raise exception.GroupTypeInUse(group_type_id=id)
|
||||||
model_query(context, models.GroupTypes, session=session).\
|
model_query(context, models.GroupTypes, session=session).\
|
||||||
filter_by(id=id).\
|
filter_by(id=id).\
|
||||||
@ -6086,9 +6085,9 @@ def purge_deleted_rows(context, age_in_days):
|
|||||||
for table in reversed(metadata.sorted_tables):
|
for table in reversed(metadata.sorted_tables):
|
||||||
if 'deleted' not in table.columns.keys():
|
if 'deleted' not in table.columns.keys():
|
||||||
continue
|
continue
|
||||||
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
|
LOG.info('Purging deleted rows older than age=%(age)d days '
|
||||||
'from table=%(table)s'), {'age': age_in_days,
|
'from table=%(table)s', {'age': age_in_days,
|
||||||
'table': table})
|
'table': table})
|
||||||
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
|
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
|
||||||
try:
|
try:
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -6104,14 +6103,14 @@ def purge_deleted_rows(context, age_in_days):
|
|||||||
table.delete()
|
table.delete()
|
||||||
.where(table.c.deleted_at < deleted_age))
|
.where(table.c.deleted_at < deleted_age))
|
||||||
except db_exc.DBReferenceError as ex:
|
except db_exc.DBReferenceError as ex:
|
||||||
LOG.error(_LE('DBError detected when purging from '
|
LOG.error('DBError detected when purging from '
|
||||||
'%(tablename)s: %(error)s.'),
|
'%(tablename)s: %(error)s.',
|
||||||
{'tablename': table, 'error': six.text_type(ex)})
|
{'tablename': table, 'error': ex})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
rows_purged = result.rowcount
|
rows_purged = result.rowcount
|
||||||
if rows_purged != 0:
|
if rows_purged != 0:
|
||||||
LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"),
|
LOG.info("Deleted %(row)d rows from table=%(table)s",
|
||||||
{'row': rows_purged, 'table': table})
|
{'row': rows_purged, 'table': table})
|
||||||
|
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ import webob.exc
|
|||||||
from webob.util import status_generic_reasons
|
from webob.util import status_generic_reasons
|
||||||
from webob.util import status_reasons
|
from webob.util import status_reasons
|
||||||
|
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -108,9 +108,9 @@ class CinderException(Exception):
|
|||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_LE('Exception in string format operation'))
|
LOG.exception('Exception in string format operation')
|
||||||
for name, value in kwargs.items():
|
for name, value in kwargs.items():
|
||||||
LOG.error(_LE("%(name)s: %(value)s"),
|
LOG.error("%(name)s: %(value)s",
|
||||||
{'name': name, 'value': value})
|
{'name': name, 'value': value})
|
||||||
if CONF.fatal_exception_format_errors:
|
if CONF.fatal_exception_format_errors:
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
|
@ -29,7 +29,7 @@ from oslo_utils import uuidutils
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import base as objects_base
|
from cinder.objects import base as objects_base
|
||||||
from cinder.objects import fields as c_fields
|
from cinder.objects import fields as c_fields
|
||||||
@ -117,9 +117,8 @@ class API(base.Base):
|
|||||||
availability_zone = (
|
availability_zone = (
|
||||||
CONF.default_availability_zone or
|
CONF.default_availability_zone or
|
||||||
CONF.storage_availability_zone)
|
CONF.storage_availability_zone)
|
||||||
LOG.warning(_LW("Availability zone '%(s_az)s' "
|
LOG.warning("Availability zone '%(s_az)s' not found, falling "
|
||||||
"not found, falling back to "
|
"back to '%(s_fallback_az)s'.",
|
||||||
"'%(s_fallback_az)s'."),
|
|
||||||
{'s_az': original_az,
|
{'s_az': original_az,
|
||||||
's_fallback_az': availability_zone})
|
's_fallback_az': availability_zone})
|
||||||
else:
|
else:
|
||||||
@ -159,8 +158,8 @@ class API(base.Base):
|
|||||||
group.create()
|
group.create()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating group"
|
LOG.error("Error occurred when creating group"
|
||||||
" %s."), name)
|
" %s.", name)
|
||||||
|
|
||||||
request_spec_list = []
|
request_spec_list = []
|
||||||
filter_properties_list = []
|
filter_properties_list = []
|
||||||
@ -222,19 +221,18 @@ class API(base.Base):
|
|||||||
source_group_id=source_group_id)
|
source_group_id=source_group_id)
|
||||||
except exception.GroupNotFound:
|
except exception.GroupNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Source Group %(source_group)s not found when "
|
LOG.error("Source Group %(source_group)s not found when "
|
||||||
"creating group %(group)s from "
|
"creating group %(group)s from source.",
|
||||||
"source."),
|
|
||||||
{'group': name, 'source_group': source_group_id})
|
{'group': name, 'source_group': source_group_id})
|
||||||
except exception.GroupSnapshotNotFound:
|
except exception.GroupSnapshotNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Group snapshot %(group_snap)s not found when "
|
LOG.error("Group snapshot %(group_snap)s not found when "
|
||||||
"creating group %(group)s from source."),
|
"creating group %(group)s from source.",
|
||||||
{'group': name, 'group_snap': group_snapshot_id})
|
{'group': name, 'group_snap': group_snapshot_id})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating group"
|
LOG.error("Error occurred when creating group"
|
||||||
" %(group)s from group_snapshot %(grp_snap)s."),
|
" %(group)s from group_snapshot %(grp_snap)s.",
|
||||||
{'group': name, 'grp_snap': group_snapshot_id})
|
{'group': name, 'grp_snap': group_snapshot_id})
|
||||||
|
|
||||||
# Update quota for groups
|
# Update quota for groups
|
||||||
@ -286,9 +284,9 @@ class API(base.Base):
|
|||||||
except exception.GroupVolumeTypeMappingExists:
|
except exception.GroupVolumeTypeMappingExists:
|
||||||
# Only need to create one group volume_type mapping
|
# Only need to create one group volume_type mapping
|
||||||
# entry for the same combination, skipping.
|
# entry for the same combination, skipping.
|
||||||
LOG.info(_LI("A mapping entry already exists for group"
|
LOG.info("A mapping entry already exists for group"
|
||||||
" %(grp)s and volume type %(vol_type)s. "
|
" %(grp)s and volume type %(vol_type)s. "
|
||||||
"Do not need to create again."),
|
"Do not need to create again.",
|
||||||
{'grp': group.id,
|
{'grp': group.id,
|
||||||
'vol_type': volume_type_id})
|
'vol_type': volume_type_id})
|
||||||
pass
|
pass
|
||||||
@ -306,10 +304,10 @@ class API(base.Base):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating volume "
|
LOG.error("Error occurred when creating volume "
|
||||||
"entry from snapshot in the process of "
|
"entry from snapshot in the process of "
|
||||||
"creating group %(group)s "
|
"creating group %(group)s "
|
||||||
"from group snapshot %(group_snap)s."),
|
"from group snapshot %(group_snap)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'group_snap': group_snapshot.id})
|
'group_snap': group_snapshot.id})
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -317,9 +315,8 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating group "
|
LOG.error("Error occurred when creating group "
|
||||||
"%(group)s from group snapshot "
|
"%(group)s from group snapshot %(group_snap)s.",
|
||||||
"%(group_snap)s."),
|
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'group_snap': group_snapshot.id})
|
'group_snap': group_snapshot.id})
|
||||||
|
|
||||||
@ -364,9 +361,9 @@ class API(base.Base):
|
|||||||
except exception.GroupVolumeTypeMappingExists:
|
except exception.GroupVolumeTypeMappingExists:
|
||||||
# Only need to create one group volume_type mapping
|
# Only need to create one group volume_type mapping
|
||||||
# entry for the same combination, skipping.
|
# entry for the same combination, skipping.
|
||||||
LOG.info(_LI("A mapping entry already exists for group"
|
LOG.info("A mapping entry already exists for group"
|
||||||
" %(grp)s and volume type %(vol_type)s. "
|
" %(grp)s and volume type %(vol_type)s. "
|
||||||
"Do not need to create again."),
|
"Do not need to create again.",
|
||||||
{'grp': group.id,
|
{'grp': group.id,
|
||||||
'vol_type': volume_type_id})
|
'vol_type': volume_type_id})
|
||||||
pass
|
pass
|
||||||
@ -384,10 +381,10 @@ class API(base.Base):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error occurred when creating cloned "
|
LOG.error("Error occurred when creating cloned "
|
||||||
"volume in the process of creating "
|
"volume in the process of creating "
|
||||||
"group %(group)s from "
|
"group %(group)s from "
|
||||||
"source group %(source_group)s."),
|
"source group %(source_group)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'source_group': source_group.id})
|
'source_group': source_group.id})
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -395,9 +392,9 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating "
|
LOG.error("Error occurred when creating "
|
||||||
"group %(group)s from source group "
|
"group %(group)s from source group "
|
||||||
"%(source_group)s."),
|
"%(source_group)s.",
|
||||||
{'group': group.id,
|
{'group': group.id,
|
||||||
'source_group': source_group.id})
|
'source_group': source_group.id})
|
||||||
|
|
||||||
@ -467,9 +464,8 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
group.destroy()
|
group.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when building "
|
LOG.error("Error occurred when building request spec "
|
||||||
"request spec list for group "
|
"list for group %s.", group.id)
|
||||||
"%s."), group.id)
|
|
||||||
|
|
||||||
# Cast to the scheduler and let it handle whatever is needed
|
# Cast to the scheduler and let it handle whatever is needed
|
||||||
# to select the target host for this group.
|
# to select the target host for this group.
|
||||||
@ -497,8 +493,7 @@ class API(base.Base):
|
|||||||
quota_utils.process_reserve_over_quota(
|
quota_utils.process_reserve_over_quota(
|
||||||
context, e, resource='groups')
|
context, e, resource='groups')
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Failed to update quota for "
|
LOG.error("Failed to update quota for group %s.", group.id)
|
||||||
"group %s."), group.id)
|
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def delete(self, context, group, delete_volumes=False):
|
def delete(self, context, group, delete_volumes=False):
|
||||||
@ -823,8 +818,8 @@ class API(base.Base):
|
|||||||
if group_snapshot.obj_attr_is_set('id'):
|
if group_snapshot.obj_attr_is_set('id'):
|
||||||
group_snapshot.destroy()
|
group_snapshot.destroy()
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Error occurred when creating group_snapshot"
|
LOG.error("Error occurred when creating group_snapshot"
|
||||||
" %s."), group_snapshot_id)
|
" %s.", group_snapshot_id)
|
||||||
|
|
||||||
self.volume_rpcapi.create_group_snapshot(context, group_snapshot)
|
self.volume_rpcapi.create_group_snapshot(context, group_snapshot)
|
||||||
|
|
||||||
|
@ -27,16 +27,6 @@ _translators = i18n.TranslatorFactory(domain=DOMAIN)
|
|||||||
# The primary translation function using the well-known name "_"
|
# The primary translation function using the well-known name "_"
|
||||||
_ = _translators.primary
|
_ = _translators.primary
|
||||||
|
|
||||||
# Translators for log levels.
|
|
||||||
#
|
|
||||||
# The abbreviated names are meant to reflect the usual use of a short
|
|
||||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
|
||||||
# the level.
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|
||||||
|
|
||||||
def enable_lazy(enable=True):
|
def enable_lazy(enable=True):
|
||||||
return i18n.enable_lazy(enable)
|
return i18n.enable_lazy(enable)
|
||||||
|
@ -19,7 +19,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -176,8 +175,8 @@ class ImageVolumeCache(object):
|
|||||||
# to 0.
|
# to 0.
|
||||||
if self.max_cache_size_gb > 0:
|
if self.max_cache_size_gb > 0:
|
||||||
if current_size > self.max_cache_size_gb > 0:
|
if current_size > self.max_cache_size_gb > 0:
|
||||||
LOG.warning(_LW('Image-volume cache for %(service)s does '
|
LOG.warning('Image-volume cache for %(service)s does '
|
||||||
'not have enough space (GB).'),
|
'not have enough space (GB).',
|
||||||
{'service': volume.service_topic_queue})
|
{'service': volume.service_topic_queue})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ from six.moves import range
|
|||||||
from six.moves import urllib
|
from six.moves import urllib
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
glance_opts = [
|
glance_opts = [
|
||||||
@ -198,9 +198,9 @@ class GlanceClientWrapper(object):
|
|||||||
except retry_excs as e:
|
except retry_excs as e:
|
||||||
netloc = self.netloc
|
netloc = self.netloc
|
||||||
extra = "retrying"
|
extra = "retrying"
|
||||||
error_msg = _LE("Error contacting glance server "
|
error_msg = _("Error contacting glance server "
|
||||||
"'%(netloc)s' for '%(method)s', "
|
"'%(netloc)s' for '%(method)s', "
|
||||||
"%(extra)s.")
|
"%(extra)s.")
|
||||||
if attempt == num_attempts:
|
if attempt == num_attempts:
|
||||||
extra = 'done trying'
|
extra = 'done trying'
|
||||||
LOG.exception(error_msg, {'netloc': netloc,
|
LOG.exception(error_msg, {'netloc': netloc,
|
||||||
|
@ -42,7 +42,7 @@ from oslo_utils import units
|
|||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume import throttling
|
from cinder.volume import throttling
|
||||||
from cinder.volume import utils as volume_utils
|
from cinder.volume import utils as volume_utils
|
||||||
@ -88,7 +88,7 @@ def get_qemu_img_version():
|
|||||||
pattern = r"qemu-img version ([0-9\.]*)"
|
pattern = r"qemu-img version ([0-9\.]*)"
|
||||||
version = re.match(pattern, info)
|
version = re.match(pattern, info)
|
||||||
if not version:
|
if not version:
|
||||||
LOG.warning(_LW("qemu-img is not installed."))
|
LOG.warning("qemu-img is not installed.")
|
||||||
return None
|
return None
|
||||||
return _get_version_from_string(version.groups()[0])
|
return _get_version_from_string(version.groups()[0])
|
||||||
|
|
||||||
@ -149,8 +149,8 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True):
|
|||||||
image_size = qemu_img_info(source,
|
image_size = qemu_img_info(source,
|
||||||
run_as_root=run_as_root).virtual_size
|
run_as_root=run_as_root).virtual_size
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
msg = _LI("The image was successfully converted, but image size "
|
msg = ("The image was successfully converted, but image size "
|
||||||
"is unavailable. src %(src)s, dest %(dest)s. %(error)s")
|
"is unavailable. src %(src)s, dest %(dest)s. %(error)s")
|
||||||
LOG.info(msg, {"src": source,
|
LOG.info(msg, {"src": source,
|
||||||
"dest": dest,
|
"dest": dest,
|
||||||
"error": e})
|
"error": e})
|
||||||
@ -165,7 +165,7 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True):
|
|||||||
"duration": duration,
|
"duration": duration,
|
||||||
"dest": dest})
|
"dest": dest})
|
||||||
|
|
||||||
msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s")
|
msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s"
|
||||||
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
|
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
|
||||||
|
|
||||||
|
|
||||||
@ -198,9 +198,9 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
if e.errno == errno.ENOSPC:
|
if e.errno == errno.ENOSPC:
|
||||||
# TODO(eharney): Fire an async error message for this
|
# TODO(eharney): Fire an async error message for this
|
||||||
LOG.error(_LE("No space left in image_conversion_dir "
|
LOG.error("No space left in image_conversion_dir "
|
||||||
"path (%(path)s) while fetching "
|
"path (%(path)s) while fetching "
|
||||||
"image %(image)s."),
|
"image %(image)s.",
|
||||||
{'path': os.path.dirname(path),
|
{'path': os.path.dirname(path),
|
||||||
'image': image_id})
|
'image': image_id})
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
|
|||||||
LOG.debug(msg, {"dest": image_file.name,
|
LOG.debug(msg, {"dest": image_file.name,
|
||||||
"sz": fsz_mb,
|
"sz": fsz_mb,
|
||||||
"duration": duration})
|
"duration": duration})
|
||||||
msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s")
|
msg = "Image download %(sz).2f MB at %(mbps).2f MB/s"
|
||||||
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
|
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
|
||||||
|
|
||||||
|
|
||||||
@ -530,8 +530,8 @@ def cleanup_temporary_file(backend_name):
|
|||||||
path = os.path.join(temp_dir, tmp_file)
|
path = os.path.join(temp_dir, tmp_file)
|
||||||
os.remove(path)
|
os.remove(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
LOG.warning(_LW("Exception caught while clearing temporary image "
|
LOG.warning("Exception caught while clearing temporary image "
|
||||||
"files: %s"), e)
|
"files: %s", e)
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
|
@ -19,8 +19,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_log import versionutils
|
from oslo_log import versionutils
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -46,13 +44,13 @@ def set_overrides(conf):
|
|||||||
try:
|
try:
|
||||||
api_class = conf.key_manager.api_class
|
api_class = conf.key_manager.api_class
|
||||||
except cfg.NoSuchOptError:
|
except cfg.NoSuchOptError:
|
||||||
LOG.warning(_LW("key_manager.api_class is not set, will use deprecated"
|
LOG.warning("key_manager.api_class is not set, will use deprecated"
|
||||||
" option keymgr.api_class if set"))
|
" option keymgr.api_class if set")
|
||||||
try:
|
try:
|
||||||
api_class = CONF.keymgr.api_class
|
api_class = CONF.keymgr.api_class
|
||||||
should_override = True
|
should_override = True
|
||||||
except cfg.NoSuchOptError:
|
except cfg.NoSuchOptError:
|
||||||
LOG.warning(_LW("keymgr.api_class is not set"))
|
LOG.warning("keymgr.api_class is not set")
|
||||||
|
|
||||||
deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager'
|
deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager'
|
||||||
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
|
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
|
||||||
@ -72,7 +70,7 @@ def set_overrides(conf):
|
|||||||
should_override = True
|
should_override = True
|
||||||
# TODO(kfarr): key_manager.api_class should be set in DevStack, and
|
# TODO(kfarr): key_manager.api_class should be set in DevStack, and
|
||||||
# this block can be removed
|
# this block can be removed
|
||||||
LOG.warning(_LW("key manager not set, using insecure default %s"),
|
LOG.warning("key manager not set, using insecure default %s",
|
||||||
castellan_mock)
|
castellan_mock)
|
||||||
api_class = castellan_mock
|
api_class = castellan_mock
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
key_mgr_opts = [
|
key_mgr_opts = [
|
||||||
@ -67,8 +67,8 @@ class ConfKeyManager(key_manager.KeyManager):
|
|||||||
|
|
||||||
def __init__(self, configuration):
|
def __init__(self, configuration):
|
||||||
if not ConfKeyManager.warning_logged:
|
if not ConfKeyManager.warning_logged:
|
||||||
LOG.warning(_LW('This key manager is insecure and is not '
|
LOG.warning('This key manager is insecure and is not '
|
||||||
'recommended for production deployments'))
|
'recommended for production deployments')
|
||||||
ConfKeyManager.warning_logged = True
|
ConfKeyManager.warning_logged = True
|
||||||
|
|
||||||
super(ConfKeyManager, self).__init__(configuration)
|
super(ConfKeyManager, self).__init__(configuration)
|
||||||
@ -143,4 +143,4 @@ class ConfKeyManager(key_manager.KeyManager):
|
|||||||
raise exception.KeyManagerError(
|
raise exception.KeyManagerError(
|
||||||
reason="cannot delete non-existent key")
|
reason="cannot delete non-existent key")
|
||||||
|
|
||||||
LOG.warning(_LW("Not deleting key %s"), managed_object_id)
|
LOG.warning("Not deleting key %s", managed_object_id)
|
||||||
|
@ -62,7 +62,6 @@ from cinder import context
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LI, _LW
|
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import rpc
|
from cinder import rpc
|
||||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||||
@ -141,7 +140,7 @@ class Manager(base.Base, PeriodicTasks):
|
|||||||
We're utilizing it to reset RPC API version pins to avoid restart of
|
We're utilizing it to reset RPC API version pins to avoid restart of
|
||||||
the service when rolling upgrade is completed.
|
the service when rolling upgrade is completed.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Resetting cached RPC version pins.'))
|
LOG.info('Resetting cached RPC version pins.')
|
||||||
rpc.LAST_OBJ_VERSIONS = {}
|
rpc.LAST_OBJ_VERSIONS = {}
|
||||||
rpc.LAST_RPC_VERSIONS = {}
|
rpc.LAST_RPC_VERSIONS = {}
|
||||||
|
|
||||||
@ -198,9 +197,9 @@ class SchedulerDependentManager(ThreadPoolManager):
|
|||||||
# This means we have Newton's c-sch in the deployment, so
|
# This means we have Newton's c-sch in the deployment, so
|
||||||
# rpcapi cannot send the message. We can safely ignore the
|
# rpcapi cannot send the message. We can safely ignore the
|
||||||
# error. Log it because it shouldn't happen after upgrade.
|
# error. Log it because it shouldn't happen after upgrade.
|
||||||
msg = _LW("Failed to notify about cinder-volume service "
|
msg = ("Failed to notify about cinder-volume service "
|
||||||
"capabilities for host %(host)s. This is normal "
|
"capabilities for host %(host)s. This is normal "
|
||||||
"during a live upgrade. Error: %(e)s")
|
"during a live upgrade. Error: %(e)s")
|
||||||
LOG.warning(msg, {'host': self.host, 'e': e})
|
LOG.warning(msg, {'host': self.host, 'e': e})
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
@ -210,7 +209,7 @@ class SchedulerDependentManager(ThreadPoolManager):
|
|||||||
|
|
||||||
class CleanableManager(object):
|
class CleanableManager(object):
|
||||||
def do_cleanup(self, context, cleanup_request):
|
def do_cleanup(self, context, cleanup_request):
|
||||||
LOG.info(_LI('Initiating service %s cleanup'),
|
LOG.info('Initiating service %s cleanup',
|
||||||
cleanup_request.service_id)
|
cleanup_request.service_id)
|
||||||
|
|
||||||
# If the 'until' field in the cleanup request is not set, we default to
|
# If the 'until' field in the cleanup request is not set, we default to
|
||||||
@ -264,8 +263,8 @@ class CleanableManager(object):
|
|||||||
'exp_sts': clean.status,
|
'exp_sts': clean.status,
|
||||||
'found_sts': vo.status})
|
'found_sts': vo.status})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Cleaning %(type)s with id %(id)s and status '
|
LOG.info('Cleaning %(type)s with id %(id)s and status '
|
||||||
'%(status)s'),
|
'%(status)s',
|
||||||
{'type': clean.resource_type,
|
{'type': clean.resource_type,
|
||||||
'id': clean.resource_id,
|
'id': clean.resource_id,
|
||||||
'status': clean.status},
|
'status': clean.status},
|
||||||
@ -276,7 +275,7 @@ class CleanableManager(object):
|
|||||||
# of it
|
# of it
|
||||||
keep_entry = self._do_cleanup(context, vo)
|
keep_entry = self._do_cleanup(context, vo)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Could not perform cleanup.'))
|
LOG.exception('Could not perform cleanup.')
|
||||||
# Return the worker DB entry to the original service
|
# Return the worker DB entry to the original service
|
||||||
db.worker_update(context, clean.id,
|
db.worker_update(context, clean.id,
|
||||||
service_id=original_service_id,
|
service_id=original_service_id,
|
||||||
@ -288,10 +287,9 @@ class CleanableManager(object):
|
|||||||
# method doesn't want to keep the entry (for example for delayed
|
# method doesn't want to keep the entry (for example for delayed
|
||||||
# deletion).
|
# deletion).
|
||||||
if not keep_entry and not db.worker_destroy(context, id=clean.id):
|
if not keep_entry and not db.worker_destroy(context, id=clean.id):
|
||||||
LOG.warning(_LW('Could not remove worker entry %s.'), clean.id)
|
LOG.warning('Could not remove worker entry %s.', clean.id)
|
||||||
|
|
||||||
LOG.info(_LI('Service %s cleanup completed.'),
|
LOG.info('Service %s cleanup completed.', cleanup_request.service_id)
|
||||||
cleanup_request.service_id)
|
|
||||||
|
|
||||||
def _do_cleanup(self, ctxt, vo_resource):
|
def _do_cleanup(self, ctxt, vo_resource):
|
||||||
return False
|
return False
|
||||||
|
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder.i18n import _LE, _LI
|
|
||||||
from cinder.message import defined_messages
|
from cinder.message import defined_messages
|
||||||
|
|
||||||
|
|
||||||
@ -39,7 +38,7 @@ class API(base.Base):
|
|||||||
def create(self, context, event_id, project_id, resource_type=None,
|
def create(self, context, event_id, project_id, resource_type=None,
|
||||||
resource_uuid=None, level="ERROR"):
|
resource_uuid=None, level="ERROR"):
|
||||||
"""Create a message with the specified information."""
|
"""Create a message with the specified information."""
|
||||||
LOG.info(_LI("Creating message record for request_id = %s"),
|
LOG.info("Creating message record for request_id = %s",
|
||||||
context.request_id)
|
context.request_id)
|
||||||
# Ensure valid event_id
|
# Ensure valid event_id
|
||||||
defined_messages.get_message_text(event_id)
|
defined_messages.get_message_text(event_id)
|
||||||
@ -57,8 +56,8 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
self.db.message_create(context, message_record)
|
self.db.message_create(context, message_record)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to create message record "
|
LOG.exception("Failed to create message record "
|
||||||
"for request_id %s"), context.request_id)
|
"for request_id %s", context.request_id)
|
||||||
|
|
||||||
def get(self, context, id):
|
def get(self, context, id):
|
||||||
"""Return message with the specified id."""
|
"""Return message with the specified id."""
|
||||||
|
@ -15,7 +15,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import base
|
from cinder.objects import base
|
||||||
from cinder.objects import fields as c_fields
|
from cinder.objects import fields as c_fields
|
||||||
@ -149,7 +149,7 @@ class QualityOfServiceSpecs(base.CinderPersistentObject,
|
|||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.Invalid(msg)
|
raise exception.Invalid(msg)
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
LOG.exception(_LE('DB error occurred when creating QoS specs.'))
|
LOG.exception('DB error occurred when creating QoS specs.')
|
||||||
raise exception.QoSSpecsCreateFailed(name=self.name,
|
raise exception.QoSSpecsCreateFailed(name=self.name,
|
||||||
qos_specs=self.specs)
|
qos_specs=self.specs)
|
||||||
# Save ID with the object
|
# Save ID with the object
|
||||||
|
@ -29,7 +29,7 @@ import six
|
|||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder import quota_utils
|
from cinder import quota_utils
|
||||||
|
|
||||||
|
|
||||||
@ -1044,8 +1044,7 @@ class QuotaEngine(object):
|
|||||||
# usage resynchronization and the reservation expiration
|
# usage resynchronization and the reservation expiration
|
||||||
# mechanisms will resolve the issue. The exception is
|
# mechanisms will resolve the issue. The exception is
|
||||||
# logged, however, because this is less than optimal.
|
# logged, however, because this is less than optimal.
|
||||||
LOG.exception(_LE("Failed to commit "
|
LOG.exception("Failed to commit reservations %s", reservations)
|
||||||
"reservations %s"), reservations)
|
|
||||||
|
|
||||||
def rollback(self, context, reservations, project_id=None):
|
def rollback(self, context, reservations, project_id=None):
|
||||||
"""Roll back reservations.
|
"""Roll back reservations.
|
||||||
@ -1065,8 +1064,7 @@ class QuotaEngine(object):
|
|||||||
# usage resynchronization and the reservation expiration
|
# usage resynchronization and the reservation expiration
|
||||||
# mechanisms will resolve the issue. The exception is
|
# mechanisms will resolve the issue. The exception is
|
||||||
# logged, however, because this is less than optimal.
|
# logged, however, because this is less than optimal.
|
||||||
LOG.exception(_LE("Failed to roll back reservations "
|
LOG.exception("Failed to roll back reservations %s", reservations)
|
||||||
"%s"), reservations)
|
|
||||||
|
|
||||||
def destroy_by_project(self, context, project_id):
|
def destroy_by_project(self, context, project_id):
|
||||||
"""Destroy all quota limits associated with a project.
|
"""Destroy all quota limits associated with a project.
|
||||||
|
@ -22,7 +22,7 @@ from keystoneclient import exceptions
|
|||||||
|
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__',
|
CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__',
|
||||||
@ -265,9 +265,9 @@ def process_reserve_over_quota(context, over_quota_exception,
|
|||||||
|
|
||||||
for over in overs:
|
for over in overs:
|
||||||
if 'gigabytes' in over:
|
if 'gigabytes' in over:
|
||||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
msg = ("Quota exceeded for %(s_pid)s, tried to create "
|
||||||
"%(s_size)dG %(s_resource)s (%(d_consumed)dG of "
|
"%(s_size)dG %(s_resource)s (%(d_consumed)dG of "
|
||||||
"%(d_quota)dG already consumed).")
|
"%(d_quota)dG already consumed).")
|
||||||
LOG.warning(msg, {'s_pid': context.project_id,
|
LOG.warning(msg, {'s_pid': context.project_id,
|
||||||
's_size': size,
|
's_size': size,
|
||||||
's_resource': resource[:-1],
|
's_resource': resource[:-1],
|
||||||
@ -284,9 +284,9 @@ def process_reserve_over_quota(context, over_quota_exception,
|
|||||||
quota=quotas[over])
|
quota=quotas[over])
|
||||||
if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and
|
if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and
|
||||||
resource in over):
|
resource in over):
|
||||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
msg = ("Quota exceeded for %(s_pid)s, tried to create "
|
||||||
"%(s_resource)s (%(d_consumed)d %(s_resource)ss "
|
"%(s_resource)s (%(d_consumed)d %(s_resource)ss "
|
||||||
"already consumed).")
|
"already consumed).")
|
||||||
LOG.warning(msg, {'s_pid': context.project_id,
|
LOG.warning(msg, {'s_pid': context.project_id,
|
||||||
'd_consumed': _consumed(over),
|
'd_consumed': _consumed(over),
|
||||||
's_resource': resource[:-1]})
|
's_resource': resource[:-1]})
|
||||||
|
@ -35,7 +35,7 @@ import six
|
|||||||
|
|
||||||
import cinder.context
|
import cinder.context
|
||||||
import cinder.exception
|
import cinder.exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import base
|
from cinder.objects import base
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -93,7 +93,7 @@ def initialized():
|
|||||||
def cleanup():
|
def cleanup():
|
||||||
global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER
|
global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER
|
||||||
if NOTIFIER is None:
|
if NOTIFIER is None:
|
||||||
LOG.exception(_LE("RPC cleanup: NOTIFIER is None"))
|
LOG.exception("RPC cleanup: NOTIFIER is None")
|
||||||
TRANSPORT.cleanup()
|
TRANSPORT.cleanup()
|
||||||
NOTIFICATION_TRANSPORT.cleanup()
|
NOTIFICATION_TRANSPORT.cleanup()
|
||||||
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
|
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
|
||||||
@ -256,8 +256,8 @@ class RPCAPI(object):
|
|||||||
# If there is no service we assume they will come up later and will
|
# If there is no service we assume they will come up later and will
|
||||||
# have the same version as we do.
|
# have the same version as we do.
|
||||||
version_cap = cls.RPC_API_VERSION
|
version_cap = cls.RPC_API_VERSION
|
||||||
LOG.info(_LI('Automatically selected %(binary)s RPC version '
|
LOG.info('Automatically selected %(binary)s RPC version '
|
||||||
'%(version)s as minimum service version.'),
|
'%(version)s as minimum service version.',
|
||||||
{'binary': cls.BINARY, 'version': version_cap})
|
{'binary': cls.BINARY, 'version': version_cap})
|
||||||
LAST_RPC_VERSIONS[cls.BINARY] = version_cap
|
LAST_RPC_VERSIONS[cls.BINARY] = version_cap
|
||||||
return version_cap
|
return version_cap
|
||||||
@ -274,8 +274,8 @@ class RPCAPI(object):
|
|||||||
# have the same version as we do.
|
# have the same version as we do.
|
||||||
if not version_cap:
|
if not version_cap:
|
||||||
version_cap = base.OBJ_VERSIONS.get_current()
|
version_cap = base.OBJ_VERSIONS.get_current()
|
||||||
LOG.info(_LI('Automatically selected %(binary)s objects version '
|
LOG.info('Automatically selected %(binary)s objects version '
|
||||||
'%(version)s as minimum service version.'),
|
'%(version)s as minimum service version.',
|
||||||
{'binary': cls.BINARY, 'version': version_cap})
|
{'binary': cls.BINARY, 'version': version_cap})
|
||||||
LAST_OBJ_VERSIONS[cls.BINARY] = version_cap
|
LAST_OBJ_VERSIONS[cls.BINARY] = version_cap
|
||||||
return version_cap
|
return version_cap
|
||||||
|
@ -17,9 +17,7 @@
|
|||||||
Filter support
|
Filter support
|
||||||
"""
|
"""
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
|
||||||
|
|
||||||
from cinder.i18n import _LI
|
|
||||||
from cinder.scheduler import base_handler
|
from cinder.scheduler import base_handler
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -69,22 +67,17 @@ class BaseFilterHandler(base_handler.BaseHandler):
|
|||||||
# Log the filtration history
|
# Log the filtration history
|
||||||
rspec = filter_properties.get("request_spec", {})
|
rspec = filter_properties.get("request_spec", {})
|
||||||
msg_dict = {"vol_id": rspec.get("volume_id", ""),
|
msg_dict = {"vol_id": rspec.get("volume_id", ""),
|
||||||
"str_results": six.text_type(full_filter_results),
|
"str_results": full_filter_results}
|
||||||
}
|
LOG.debug("Filtering removed all hosts for the request with "
|
||||||
full_msg = ("Filtering removed all hosts for the request with "
|
"volume ID '%(vol_id)s'. Filter results: %(str_results)s",
|
||||||
"volume ID "
|
msg_dict)
|
||||||
"'%(vol_id)s'. Filter results: %(str_results)s"
|
|
||||||
) % msg_dict
|
|
||||||
msg_dict["str_results"] = ', '.join(
|
msg_dict["str_results"] = ', '.join(
|
||||||
_LI("%(cls_name)s: (start: %(start)s, end: %(end)s)") % {
|
"%(cls_name)s: (start: %(start)s, end: %(end)s)" % {
|
||||||
"cls_name": value[0], "start": value[1], "end": value[2]}
|
"cls_name": value[0], "start": value[1], "end": value[2]}
|
||||||
for value in part_filter_results)
|
for value in part_filter_results)
|
||||||
part_msg = _LI("Filtering removed all hosts for the request with "
|
LOG.info("Filtering removed all hosts for the request with "
|
||||||
"volume ID "
|
"volume ID '%(vol_id)s'. Filter results: %(str_results)s",
|
||||||
"'%(vol_id)s'. Filter results: %(str_results)s"
|
msg_dict)
|
||||||
) % msg_dict
|
|
||||||
LOG.debug(full_msg)
|
|
||||||
LOG.info(part_msg)
|
|
||||||
|
|
||||||
def get_filtered_objects(self, filter_classes, objs,
|
def get_filtered_objects(self, filter_classes, objs,
|
||||||
filter_properties, index=0):
|
filter_properties, index=0):
|
||||||
@ -115,7 +108,7 @@ class BaseFilterHandler(base_handler.BaseHandler):
|
|||||||
if filter_class.run_filter_for_index(index):
|
if filter_class.run_filter_for_index(index):
|
||||||
objs = filter_class.filter_all(list_objs, filter_properties)
|
objs = filter_class.filter_all(list_objs, filter_properties)
|
||||||
if objs is None:
|
if objs is None:
|
||||||
LOG.info(_LI("Filter %s returned 0 hosts"), cls_name)
|
LOG.info("Filter %s returned 0 hosts", cls_name)
|
||||||
full_filter_results.append((cls_name, None))
|
full_filter_results.append((cls_name, None))
|
||||||
list_objs = None
|
list_objs = None
|
||||||
break
|
break
|
||||||
|
@ -25,7 +25,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder.scheduler import driver
|
from cinder.scheduler import driver
|
||||||
from cinder.scheduler import scheduler_options
|
from cinder.scheduler import scheduler_options
|
||||||
from cinder.volume import utils
|
from cinder.volume import utils
|
||||||
@ -246,8 +246,8 @@ class FilterScheduler(driver.Scheduler):
|
|||||||
return # no previously attempted hosts, skip
|
return # no previously attempted hosts, skip
|
||||||
|
|
||||||
last_backend = backends[-1]
|
last_backend = backends[-1]
|
||||||
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
|
LOG.error("Error scheduling %(volume_id)s from last vol-service: "
|
||||||
"%(last_backend)s : %(exc)s"),
|
"%(last_backend)s : %(exc)s",
|
||||||
{'volume_id': volume_id,
|
{'volume_id': volume_id,
|
||||||
'last_backend': last_backend,
|
'last_backend': last_backend,
|
||||||
'exc': exc})
|
'exc': exc})
|
||||||
@ -631,8 +631,8 @@ class FilterScheduler(driver.Scheduler):
|
|||||||
if backend_id != group_backend:
|
if backend_id != group_backend:
|
||||||
weighed_backends.remove(backend)
|
weighed_backends.remove(backend)
|
||||||
if not weighed_backends:
|
if not weighed_backends:
|
||||||
LOG.warning(_LW('No weighed backend found for volume '
|
LOG.warning('No weighed backend found for volume '
|
||||||
'with properties: %s'),
|
'with properties: %s',
|
||||||
filter_properties['request_spec'].get('volume_type'))
|
filter_properties['request_spec'].get('volume_type'))
|
||||||
return None
|
return None
|
||||||
return self._choose_top_backend(weighed_backends, request_spec)
|
return self._choose_top_backend(weighed_backends, request_spec)
|
||||||
|
@ -21,7 +21,6 @@ import math
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder.i18n import _LE, _LW
|
|
||||||
from cinder.scheduler import filters
|
from cinder.scheduler import filters
|
||||||
|
|
||||||
|
|
||||||
@ -63,8 +62,8 @@ class CapacityFilter(filters.BaseBackendFilter):
|
|||||||
|
|
||||||
if backend_state.free_capacity_gb is None:
|
if backend_state.free_capacity_gb is None:
|
||||||
# Fail Safe
|
# Fail Safe
|
||||||
LOG.error(_LE("Free capacity not set: "
|
LOG.error("Free capacity not set: "
|
||||||
"volume node info collection broken."))
|
"volume node info collection broken.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
free_space = backend_state.free_capacity_gb
|
free_space = backend_state.free_capacity_gb
|
||||||
@ -88,9 +87,9 @@ class CapacityFilter(filters.BaseBackendFilter):
|
|||||||
return False
|
return False
|
||||||
total = float(total_space)
|
total = float(total_space)
|
||||||
if total <= 0:
|
if total <= 0:
|
||||||
LOG.warning(_LW("Insufficient free space for volume creation. "
|
LOG.warning("Insufficient free space for volume creation. "
|
||||||
"Total capacity is %(total).2f on %(grouping)s "
|
"Total capacity is %(total).2f on %(grouping)s "
|
||||||
"%(grouping_name)s."),
|
"%(grouping_name)s.",
|
||||||
{"total": total,
|
{"total": total,
|
||||||
"grouping": grouping,
|
"grouping": grouping,
|
||||||
"grouping_name": backend_state.backend_id})
|
"grouping_name": backend_state.backend_id})
|
||||||
@ -125,12 +124,12 @@ class CapacityFilter(filters.BaseBackendFilter):
|
|||||||
"grouping": grouping,
|
"grouping": grouping,
|
||||||
"grouping_name": backend_state.backend_id,
|
"grouping_name": backend_state.backend_id,
|
||||||
}
|
}
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Insufficient free space for thin provisioning. "
|
"Insufficient free space for thin provisioning. "
|
||||||
"The ratio of provisioned capacity over total capacity "
|
"The ratio of provisioned capacity over total capacity "
|
||||||
"%(provisioned_ratio).2f has exceeded the maximum over "
|
"%(provisioned_ratio).2f has exceeded the maximum over "
|
||||||
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
|
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
|
||||||
"%(grouping_name)s."), msg_args)
|
"%(grouping_name)s.", msg_args)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
# Thin provisioning is enabled and projected over-subscription
|
# Thin provisioning is enabled and projected over-subscription
|
||||||
@ -143,10 +142,10 @@ class CapacityFilter(filters.BaseBackendFilter):
|
|||||||
free * backend_state.max_over_subscription_ratio)
|
free * backend_state.max_over_subscription_ratio)
|
||||||
return adjusted_free_virtual >= requested_size
|
return adjusted_free_virtual >= requested_size
|
||||||
elif thin and backend_state.thin_provisioning_support:
|
elif thin and backend_state.thin_provisioning_support:
|
||||||
LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s "
|
LOG.warning("Filtering out %(grouping)s %(grouping_name)s "
|
||||||
"with an invalid maximum over subscription ratio "
|
"with an invalid maximum over subscription ratio "
|
||||||
"of %(oversub_ratio).2f. The ratio should be a "
|
"of %(oversub_ratio).2f. The ratio should be a "
|
||||||
"minimum of 1.0."),
|
"minimum of 1.0.",
|
||||||
{"oversub_ratio":
|
{"oversub_ratio":
|
||||||
backend_state.max_over_subscription_ratio,
|
backend_state.max_over_subscription_ratio,
|
||||||
"grouping": grouping,
|
"grouping": grouping,
|
||||||
@ -159,9 +158,9 @@ class CapacityFilter(filters.BaseBackendFilter):
|
|||||||
"available": free}
|
"available": free}
|
||||||
|
|
||||||
if free < requested_size:
|
if free < requested_size:
|
||||||
LOG.warning(_LW("Insufficient free space for volume creation "
|
LOG.warning("Insufficient free space for volume creation "
|
||||||
"on %(grouping)s %(grouping_name)s (requested / "
|
"on %(grouping)s %(grouping_name)s (requested / "
|
||||||
"avail): %(requested)s/%(available)s"),
|
"avail): %(requested)s/%(available)s",
|
||||||
msg_args)
|
msg_args)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
from cinder.scheduler.evaluator import evaluator
|
from cinder.scheduler.evaluator import evaluator
|
||||||
from cinder.scheduler import filters
|
from cinder.scheduler import filters
|
||||||
|
|
||||||
@ -60,8 +59,8 @@ class DriverFilter(filters.BaseBackendFilter):
|
|||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
# Warn the admin for now that there is an error in the
|
# Warn the admin for now that there is an error in the
|
||||||
# filter function.
|
# filter function.
|
||||||
LOG.warning(_LW("Error in filtering function "
|
LOG.warning("Error in filtering function "
|
||||||
"'%(function)s' : '%(error)s' :: failing backend"),
|
"'%(function)s' : '%(error)s' :: failing backend",
|
||||||
{'function': stats['filter_function'],
|
{'function': stats['filter_function'],
|
||||||
'error': ex, })
|
'error': ex, })
|
||||||
return False
|
return False
|
||||||
|
@ -18,7 +18,7 @@ from oslo_utils import uuidutils
|
|||||||
|
|
||||||
from cinder.compute import nova
|
from cinder.compute import nova
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder.scheduler import filters
|
from cinder.scheduler import filters
|
||||||
from cinder.volume import utils as volume_utils
|
from cinder.volume import utils as volume_utils
|
||||||
|
|
||||||
@ -96,8 +96,8 @@ class InstanceLocalityFilter(filters.BaseBackendFilter):
|
|||||||
return self._cache[instance_uuid] == backend
|
return self._cache[instance_uuid] == backend
|
||||||
|
|
||||||
if not self._nova_has_extended_server_attributes(context):
|
if not self._nova_has_extended_server_attributes(context):
|
||||||
LOG.warning(_LW('Hint "%s" dropped because '
|
LOG.warning('Hint "%s" dropped because '
|
||||||
'ExtendedServerAttributes not active in Nova.'),
|
'ExtendedServerAttributes not active in Nova.',
|
||||||
HINT_KEYWORD)
|
HINT_KEYWORD)
|
||||||
raise exception.CinderException(_('Hint "%s" not supported.') %
|
raise exception.CinderException(_('Hint "%s" not supported.') %
|
||||||
HINT_KEYWORD)
|
HINT_KEYWORD)
|
||||||
@ -107,10 +107,10 @@ class InstanceLocalityFilter(filters.BaseBackendFilter):
|
|||||||
timeout=REQUESTS_TIMEOUT)
|
timeout=REQUESTS_TIMEOUT)
|
||||||
|
|
||||||
if not hasattr(server, INSTANCE_HOST_PROP):
|
if not hasattr(server, INSTANCE_HOST_PROP):
|
||||||
LOG.warning(_LW('Hint "%s" dropped because Nova did not return '
|
LOG.warning('Hint "%s" dropped because Nova did not return '
|
||||||
'enough information. Either Nova policy needs to '
|
'enough information. Either Nova policy needs to '
|
||||||
'be changed or a privileged account for Nova '
|
'be changed or a privileged account for Nova '
|
||||||
'should be specified in conf.'), HINT_KEYWORD)
|
'should be specified in conf.', HINT_KEYWORD)
|
||||||
raise exception.CinderException(_('Hint "%s" not supported.') %
|
raise exception.CinderException(_('Hint "%s" not supported.') %
|
||||||
HINT_KEYWORD)
|
HINT_KEYWORD)
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ from taskflow.patterns import linear_flow
|
|||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import flow_utils
|
from cinder import flow_utils
|
||||||
from cinder.i18n import _LE
|
|
||||||
from cinder.message import api as message_api
|
from cinder.message import api as message_api
|
||||||
from cinder.message import defined_messages
|
from cinder.message import defined_messages
|
||||||
from cinder.message import resource_types
|
from cinder.message import resource_types
|
||||||
@ -96,7 +95,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
|
|||||||
try:
|
try:
|
||||||
self._notify_failure(context, request_spec, cause)
|
self._notify_failure(context, request_spec, cause)
|
||||||
finally:
|
finally:
|
||||||
LOG.error(_LE("Failed to run task %(name)s: %(cause)s"),
|
LOG.error("Failed to run task %(name)s: %(cause)s",
|
||||||
{'cause': cause, 'name': self.name})
|
{'cause': cause, 'name': self.name})
|
||||||
|
|
||||||
@utils.if_notifications_enabled
|
@utils.if_notifications_enabled
|
||||||
@ -114,8 +113,8 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
|
|||||||
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
|
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
|
||||||
payload)
|
payload)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
LOG.exception(_LE("Failed notifying on %(topic)s "
|
LOG.exception("Failed notifying on %(topic)s "
|
||||||
"payload %(payload)s"),
|
"payload %(payload)s",
|
||||||
{'topic': self.FAILURE_TOPIC, 'payload': payload})
|
{'topic': self.FAILURE_TOPIC, 'payload': payload})
|
||||||
|
|
||||||
def execute(self, context, request_spec, filter_properties, volume):
|
def execute(self, context, request_spec, filter_properties, volume):
|
||||||
|
@ -29,9 +29,8 @@ from cinder.common import constants
|
|||||||
from cinder import context as cinder_context
|
from cinder import context as cinder_context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import utils
|
|
||||||
from cinder.i18n import _LI, _LW
|
|
||||||
from cinder.scheduler import filters
|
from cinder.scheduler import filters
|
||||||
|
from cinder import utils
|
||||||
from cinder.volume import utils as vol_utils
|
from cinder.volume import utils as vol_utils
|
||||||
|
|
||||||
|
|
||||||
@ -484,8 +483,7 @@ class HostManager(object):
|
|||||||
|
|
||||||
# Ignore older updates
|
# Ignore older updates
|
||||||
if capab_old['timestamp'] and timestamp < capab_old['timestamp']:
|
if capab_old['timestamp'] and timestamp < capab_old['timestamp']:
|
||||||
LOG.info(_LI('Ignoring old capability report from %s.'),
|
LOG.info('Ignoring old capability report from %s.', backend)
|
||||||
backend)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# If the capabilites are not changed and the timestamp is older,
|
# If the capabilites are not changed and the timestamp is older,
|
||||||
@ -559,7 +557,7 @@ class HostManager(object):
|
|||||||
for service in volume_services.objects:
|
for service in volume_services.objects:
|
||||||
host = service.host
|
host = service.host
|
||||||
if not service.is_up:
|
if not service.is_up:
|
||||||
LOG.warning(_LW("volume service is down. (host: %s)"), host)
|
LOG.warning("volume service is down. (host: %s)", host)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
backend_key = service.service_topic_queue
|
backend_key = service.service_topic_queue
|
||||||
@ -601,8 +599,8 @@ class HostManager(object):
|
|||||||
# the map when we are removing it because it has been added to a
|
# the map when we are removing it because it has been added to a
|
||||||
# cluster.
|
# cluster.
|
||||||
if backend_key not in active_hosts:
|
if backend_key not in active_hosts:
|
||||||
LOG.info(_LI("Removing non-active backend: %(backend)s from "
|
LOG.info("Removing non-active backend: %(backend)s from "
|
||||||
"scheduler cache."), {'backend': backend_key})
|
"scheduler cache.", {'backend': backend_key})
|
||||||
del self.backend_state_map[backend_key]
|
del self.backend_state_map[backend_key]
|
||||||
|
|
||||||
def get_all_backend_states(self, context):
|
def get_all_backend_states(self, context):
|
||||||
|
@ -36,7 +36,7 @@ from cinder import context
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import flow_utils
|
from cinder import flow_utils
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import manager
|
from cinder import manager
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import quota
|
from cinder import quota
|
||||||
@ -141,15 +141,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
request_spec_list,
|
request_spec_list,
|
||||||
filter_properties_list)
|
filter_properties_list)
|
||||||
except exception.NoValidBackend:
|
except exception.NoValidBackend:
|
||||||
LOG.error(_LE("Could not find a backend for consistency group "
|
LOG.error("Could not find a backend for consistency group "
|
||||||
"%(group_id)s."),
|
"%(group_id)s.",
|
||||||
{'group_id': group.id})
|
{'group_id': group.id})
|
||||||
group.status = 'error'
|
group.status = 'error'
|
||||||
group.save()
|
group.save()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to create consistency group "
|
LOG.exception("Failed to create consistency group "
|
||||||
"%(group_id)s."),
|
"%(group_id)s.",
|
||||||
{'group_id': group.id})
|
{'group_id': group.id})
|
||||||
group.status = 'error'
|
group.status = 'error'
|
||||||
group.save()
|
group.save()
|
||||||
@ -166,15 +166,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
group_filter_properties,
|
group_filter_properties,
|
||||||
filter_properties_list)
|
filter_properties_list)
|
||||||
except exception.NoValidBackend:
|
except exception.NoValidBackend:
|
||||||
LOG.error(_LE("Could not find a backend for group "
|
LOG.error("Could not find a backend for group "
|
||||||
"%(group_id)s."),
|
"%(group_id)s.",
|
||||||
{'group_id': group.id})
|
{'group_id': group.id})
|
||||||
group.status = 'error'
|
group.status = 'error'
|
||||||
group.save()
|
group.save()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to create generic group "
|
LOG.exception("Failed to create generic group "
|
||||||
"%(group_id)s."),
|
"%(group_id)s.",
|
||||||
{'group_id': group.id})
|
{'group_id': group.id})
|
||||||
group.status = 'error'
|
group.status = 'error'
|
||||||
group.save()
|
group.save()
|
||||||
@ -370,7 +370,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
request_spec, msg=None):
|
request_spec, msg=None):
|
||||||
# TODO(harlowja): move into a task that just does this later.
|
# TODO(harlowja): move into a task that just does this later.
|
||||||
if not msg:
|
if not msg:
|
||||||
msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
|
msg = ("Failed to schedule_%(method)s: %(ex)s" %
|
||||||
{'method': method, 'ex': six.text_type(ex)})
|
{'method': method, 'ex': six.text_type(ex)})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
|
|
||||||
@ -445,7 +445,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
if self.upgrading_cloud:
|
if self.upgrading_cloud:
|
||||||
raise exception.UnavailableDuringUpgrade(action='workers cleanup')
|
raise exception.UnavailableDuringUpgrade(action='workers cleanup')
|
||||||
|
|
||||||
LOG.info(_LI('Workers cleanup request started.'))
|
LOG.info('Workers cleanup request started.')
|
||||||
|
|
||||||
filters = dict(service_id=cleanup_request.service_id,
|
filters = dict(service_id=cleanup_request.service_id,
|
||||||
cluster_name=cleanup_request.cluster_name,
|
cluster_name=cleanup_request.cluster_name,
|
||||||
@ -475,7 +475,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
|
|
||||||
# If it's a scheduler or the service is up, send the request.
|
# If it's a scheduler or the service is up, send the request.
|
||||||
if not dest or dest.is_up:
|
if not dest or dest.is_up:
|
||||||
LOG.info(_LI('Sending cleanup for %(binary)s %(dest_name)s.'),
|
LOG.info('Sending cleanup for %(binary)s %(dest_name)s.',
|
||||||
{'binary': service.binary,
|
{'binary': service.binary,
|
||||||
'dest_name': dest_name})
|
'dest_name': dest_name})
|
||||||
cleanup_rpc(context, cleanup_request)
|
cleanup_rpc(context, cleanup_request)
|
||||||
@ -483,11 +483,11 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
|||||||
# We don't send cleanup requests when there are no services alive
|
# We don't send cleanup requests when there are no services alive
|
||||||
# to do the cleanup.
|
# to do the cleanup.
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('No service available to cleanup %(binary)s '
|
LOG.info('No service available to cleanup %(binary)s '
|
||||||
'%(dest_name)s.'),
|
'%(dest_name)s.',
|
||||||
{'binary': service.binary,
|
{'binary': service.binary,
|
||||||
'dest_name': dest_name})
|
'dest_name': dest_name})
|
||||||
not_requested.append(service)
|
not_requested.append(service)
|
||||||
|
|
||||||
LOG.info(_LI('Cleanup requests completed.'))
|
LOG.info('Cleanup requests completed.')
|
||||||
return requested, not_requested
|
return requested, not_requested
|
||||||
|
@ -28,8 +28,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from cinder.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
scheduler_json_config_location_opt = cfg.StrOpt(
|
scheduler_json_config_location_opt = cfg.StrOpt(
|
||||||
'scheduler_json_config_location',
|
'scheduler_json_config_location',
|
||||||
@ -66,8 +64,8 @@ class SchedulerOptions(object):
|
|||||||
try:
|
try:
|
||||||
return os.path.getmtime(filename)
|
return os.path.getmtime(filename)
|
||||||
except os.error:
|
except os.error:
|
||||||
LOG.exception(_LE("Could not stat scheduler options file "
|
LOG.exception("Could not stat scheduler options file "
|
||||||
"%(filename)s."),
|
"%(filename)s.",
|
||||||
{'filename': filename})
|
{'filename': filename})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -76,7 +74,7 @@ class SchedulerOptions(object):
|
|||||||
try:
|
try:
|
||||||
return json.load(handle)
|
return json.load(handle)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.exception(_LE("Could not decode scheduler options."))
|
LOG.exception("Could not decode scheduler options.")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _get_time_now(self):
|
def _get_time_now(self):
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
from cinder.scheduler.evaluator import evaluator
|
from cinder.scheduler.evaluator import evaluator
|
||||||
from cinder.scheduler import weights
|
from cinder.scheduler import weights
|
||||||
|
|
||||||
@ -56,17 +55,17 @@ class GoodnessWeigher(weights.BaseHostWeigher):
|
|||||||
goodness_rating = 0
|
goodness_rating = 0
|
||||||
|
|
||||||
if stats['goodness_function'] is None:
|
if stats['goodness_function'] is None:
|
||||||
LOG.warning(_LW("Goodness function not set :: defaulting to "
|
LOG.warning("Goodness function not set :: defaulting to "
|
||||||
"minimal goodness rating of 0"))
|
"minimal goodness rating of 0")
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
goodness_result = self._run_evaluator(
|
goodness_result = self._run_evaluator(
|
||||||
stats['goodness_function'],
|
stats['goodness_function'],
|
||||||
stats)
|
stats)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW("Error in goodness_function function "
|
LOG.warning("Error in goodness_function function "
|
||||||
"'%(function)s' : '%(error)s' :: Defaulting "
|
"'%(function)s' : '%(error)s' :: Defaulting "
|
||||||
"to a goodness of 0"),
|
"to a goodness of 0",
|
||||||
{'function': stats['goodness_function'],
|
{'function': stats['goodness_function'],
|
||||||
'error': ex, })
|
'error': ex, })
|
||||||
return goodness_rating
|
return goodness_rating
|
||||||
@ -75,9 +74,9 @@ class GoodnessWeigher(weights.BaseHostWeigher):
|
|||||||
if goodness_result:
|
if goodness_result:
|
||||||
goodness_rating = 100
|
goodness_rating = 100
|
||||||
elif goodness_result < 0 or goodness_result > 100:
|
elif goodness_result < 0 or goodness_result > 100:
|
||||||
LOG.warning(_LW("Invalid goodness result. Result must be "
|
LOG.warning("Invalid goodness result. Result must be "
|
||||||
"between 0 and 100. Result generated: '%s' "
|
"between 0 and 100. Result generated: '%s' "
|
||||||
":: Defaulting to a goodness of 0"),
|
":: Defaulting to a goodness of 0",
|
||||||
goodness_result)
|
goodness_result)
|
||||||
else:
|
else:
|
||||||
goodness_rating = goodness_result
|
goodness_rating = goodness_result
|
||||||
|
@ -41,7 +41,7 @@ from cinder.common import constants
|
|||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import coordination
|
from cinder import coordination
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import base as objects_base
|
from cinder.objects import base as objects_base
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
@ -104,15 +104,15 @@ def setup_profiler(binary, host):
|
|||||||
host=host
|
host=host
|
||||||
)
|
)
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("OSProfiler is enabled.\nIt means that person who knows "
|
"OSProfiler is enabled.\nIt means that person who knows "
|
||||||
"any of hmac_keys that are specified in "
|
"any of hmac_keys that are specified in "
|
||||||
"/etc/cinder/cinder.conf can trace his requests. \n"
|
"/etc/cinder/cinder.conf can trace his requests. \n"
|
||||||
"In real life only operator can read this file so there "
|
"In real life only operator can read this file so there "
|
||||||
"is no security issue. Note that even if person can "
|
"is no security issue. Note that even if person can "
|
||||||
"trigger profiler, only admin user can retrieve trace "
|
"trigger profiler, only admin user can retrieve trace "
|
||||||
"information.\n"
|
"information.\n"
|
||||||
"To disable OSProfiler set in cinder.conf:\n"
|
"To disable OSProfiler set in cinder.conf:\n"
|
||||||
"[profiler]\nenabled=false"))
|
"[profiler]\nenabled=false")
|
||||||
|
|
||||||
|
|
||||||
class Service(service.Service):
|
class Service(service.Service):
|
||||||
@ -183,9 +183,9 @@ class Service(service.Service):
|
|||||||
# TODO(geguileo): In O - Remove self.is_upgrading_to_n part
|
# TODO(geguileo): In O - Remove self.is_upgrading_to_n part
|
||||||
if (service_ref.cluster_name != cluster and
|
if (service_ref.cluster_name != cluster and
|
||||||
not self.is_upgrading_to_n):
|
not self.is_upgrading_to_n):
|
||||||
LOG.info(_LI('This service has been moved from cluster '
|
LOG.info('This service has been moved from cluster '
|
||||||
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
|
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
|
||||||
'will %(opt_no)sbe moved to the new cluster'),
|
'will %(opt_no)sbe moved to the new cluster',
|
||||||
{'cluster_svc': service_ref.cluster_name,
|
{'cluster_svc': service_ref.cluster_name,
|
||||||
'cluster_cfg': cluster,
|
'cluster_cfg': cluster,
|
||||||
'opt_no': '' if self.added_to_cluster else 'NO '})
|
'opt_no': '' if self.added_to_cluster else 'NO '})
|
||||||
@ -231,7 +231,7 @@ class Service(service.Service):
|
|||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
version_string = version.version_string()
|
version_string = version.version_string()
|
||||||
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
|
LOG.info('Starting %(topic)s node (version %(version_string)s)',
|
||||||
{'topic': self.topic, 'version_string': version_string})
|
{'topic': self.topic, 'version_string': version_string})
|
||||||
self.model_disconnected = False
|
self.model_disconnected = False
|
||||||
|
|
||||||
@ -270,8 +270,8 @@ class Service(service.Service):
|
|||||||
|
|
||||||
# TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
|
# TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
|
||||||
if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
|
if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
|
||||||
LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version '
|
LOG.info('Starting %(topic)s cluster %(cluster)s (version '
|
||||||
'%(version)s)'),
|
'%(version)s)',
|
||||||
{'topic': self.topic, 'version': version_string,
|
{'topic': self.topic, 'version': version_string,
|
||||||
'cluster': self.cluster})
|
'cluster': self.cluster})
|
||||||
target = messaging.Target(
|
target = messaging.Target(
|
||||||
@ -310,11 +310,11 @@ class Service(service.Service):
|
|||||||
if CONF.service_down_time <= self.report_interval:
|
if CONF.service_down_time <= self.report_interval:
|
||||||
new_down_time = int(self.report_interval * 2.5)
|
new_down_time = int(self.report_interval * 2.5)
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Report interval must be less than service down "
|
"Report interval must be less than service down "
|
||||||
"time. Current config service_down_time: "
|
"time. Current config service_down_time: "
|
||||||
"%(service_down_time)s, report_interval for this: "
|
"%(service_down_time)s, report_interval for this: "
|
||||||
"service is: %(report_interval)s. Setting global "
|
"service is: %(report_interval)s. Setting global "
|
||||||
"service_down_time to: %(new_down_time)s"),
|
"service_down_time to: %(new_down_time)s",
|
||||||
{'service_down_time': CONF.service_down_time,
|
{'service_down_time': CONF.service_down_time,
|
||||||
'report_interval': self.report_interval,
|
'report_interval': self.report_interval,
|
||||||
'new_down_time': new_down_time})
|
'new_down_time': new_down_time})
|
||||||
@ -478,9 +478,9 @@ class Service(service.Service):
|
|||||||
if not self.manager.is_working():
|
if not self.manager.is_working():
|
||||||
# NOTE(dulek): If manager reports a problem we're not sending
|
# NOTE(dulek): If manager reports a problem we're not sending
|
||||||
# heartbeats - to indicate that service is actually down.
|
# heartbeats - to indicate that service is actually down.
|
||||||
LOG.error(_LE('Manager for service %(binary)s %(host)s is '
|
LOG.error('Manager for service %(binary)s %(host)s is '
|
||||||
'reporting problems, not sending heartbeat. '
|
'reporting problems, not sending heartbeat. '
|
||||||
'Service will appear "down".'),
|
'Service will appear "down".',
|
||||||
{'binary': self.binary,
|
{'binary': self.binary,
|
||||||
'host': self.host})
|
'host': self.host})
|
||||||
return
|
return
|
||||||
@ -506,24 +506,24 @@ class Service(service.Service):
|
|||||||
# TODO(termie): make this pattern be more elegant.
|
# TODO(termie): make this pattern be more elegant.
|
||||||
if getattr(self, 'model_disconnected', False):
|
if getattr(self, 'model_disconnected', False):
|
||||||
self.model_disconnected = False
|
self.model_disconnected = False
|
||||||
LOG.error(_LE('Recovered model server connection!'))
|
LOG.error('Recovered model server connection!')
|
||||||
|
|
||||||
except db_exc.DBConnectionError:
|
except db_exc.DBConnectionError:
|
||||||
if not getattr(self, 'model_disconnected', False):
|
if not getattr(self, 'model_disconnected', False):
|
||||||
self.model_disconnected = True
|
self.model_disconnected = True
|
||||||
LOG.exception(_LE('model server went away'))
|
LOG.exception('model server went away')
|
||||||
|
|
||||||
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
|
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
|
||||||
# such errors shouldn't kill this thread, so we handle them here.
|
# such errors shouldn't kill this thread, so we handle them here.
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
if not getattr(self, 'model_disconnected', False):
|
if not getattr(self, 'model_disconnected', False):
|
||||||
self.model_disconnected = True
|
self.model_disconnected = True
|
||||||
LOG.exception(_LE('DBError encountered: '))
|
LOG.exception('DBError encountered: ')
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
if not getattr(self, 'model_disconnected', False):
|
if not getattr(self, 'model_disconnected', False):
|
||||||
self.model_disconnected = True
|
self.model_disconnected = True
|
||||||
LOG.exception(_LE('Exception encountered: '))
|
LOG.exception('Exception encountered: ')
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.manager.reset()
|
self.manager.reset()
|
||||||
|
@ -27,7 +27,7 @@ import paramiko
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -79,8 +79,8 @@ class SSHPool(pools.Pool):
|
|||||||
|
|
||||||
if 'hosts_key_file' in kwargs.keys():
|
if 'hosts_key_file' in kwargs.keys():
|
||||||
self.hosts_key_file = kwargs.pop('hosts_key_file')
|
self.hosts_key_file = kwargs.pop('hosts_key_file')
|
||||||
LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
|
LOG.info("Secondary ssh hosts key file %(kwargs)s will be "
|
||||||
"loaded along with %(conf)s from /etc/cinder.conf."),
|
"loaded along with %(conf)s from /etc/cinder.conf.",
|
||||||
{'kwargs': self.hosts_key_file,
|
{'kwargs': self.hosts_key_file,
|
||||||
'conf': CONF.ssh_hosts_key_file})
|
'conf': CONF.ssh_hosts_key_file})
|
||||||
|
|
||||||
|
@ -16,9 +16,7 @@
|
|||||||
import mock
|
import mock
|
||||||
|
|
||||||
from cinder.scheduler import base_filter
|
from cinder.scheduler import base_filter
|
||||||
from cinder.scheduler import host_manager
|
|
||||||
from cinder import test
|
from cinder import test
|
||||||
from cinder.tests.unit import fake_constants as fake
|
|
||||||
|
|
||||||
|
|
||||||
class TestBaseFilter(test.TestCase):
|
class TestBaseFilter(test.TestCase):
|
||||||
@ -174,32 +172,3 @@ class TestBaseFilterHandler(test.TestCase):
|
|||||||
result = self._get_filtered_objects(filter_classes, index=2)
|
result = self._get_filtered_objects(filter_classes, index=2)
|
||||||
self.assertEqual(filter_objs_expected, result)
|
self.assertEqual(filter_objs_expected, result)
|
||||||
self.assertEqual(1, fake5_filter_all.call_count)
|
self.assertEqual(1, fake5_filter_all.call_count)
|
||||||
|
|
||||||
def test_get_filtered_objects_info_and_debug_log_none_returned(self):
|
|
||||||
|
|
||||||
all_filters = [FilterA, FilterA, FilterB]
|
|
||||||
fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
|
|
||||||
for x in range(1, 4)]
|
|
||||||
|
|
||||||
filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID,
|
|
||||||
'volume_properties': {'project_id': fake.PROJECT_ID,
|
|
||||||
'size': 2048,
|
|
||||||
'host': 'host4'}}}
|
|
||||||
with mock.patch.object(base_filter, 'LOG') as mock_log:
|
|
||||||
result = self.handler.get_filtered_objects(
|
|
||||||
all_filters, fake_backends, filt_props)
|
|
||||||
self.assertFalse(result)
|
|
||||||
msg = "with volume ID '%s'" % fake.VOLUME_ID
|
|
||||||
# FilterA should leave Host1 and Host2; FilterB should leave None.
|
|
||||||
exp_output = ("FilterA: (start: 3, end: 2), "
|
|
||||||
"FilterA: (start: 2, end: 1)")
|
|
||||||
cargs = mock_log.info.call_args[0][0]
|
|
||||||
self.assertIn(msg, cargs)
|
|
||||||
self.assertIn(exp_output, cargs)
|
|
||||||
|
|
||||||
exp_output = ("[('FilterA', ['fake_be2', 'fake_be3']), "
|
|
||||||
"('FilterA', ['fake_be3']), "
|
|
||||||
+ "('FilterB', None)]")
|
|
||||||
cargs = mock_log.debug.call_args[0][0]
|
|
||||||
self.assertIn(msg, cargs)
|
|
||||||
self.assertIn(exp_output, cargs)
|
|
||||||
|
@ -33,7 +33,6 @@ import six
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LW
|
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder import test
|
from cinder import test
|
||||||
from cinder.tests.unit import fake_volume
|
from cinder.tests.unit import fake_volume
|
||||||
@ -91,10 +90,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
|||||||
reserved_percentage = 100 * int(reserved_ratio)
|
reserved_percentage = 100 * int(reserved_ratio)
|
||||||
|
|
||||||
self.assertEqual(reserved_percentage, result)
|
self.assertEqual(reserved_percentage, result)
|
||||||
msg = _LW('The "netapp_size_multiplier" configuration option is '
|
msg = ('The "netapp_size_multiplier" configuration option is '
|
||||||
'deprecated and will be removed in the Mitaka release. '
|
'deprecated and will be removed in the Mitaka release. '
|
||||||
'Please set "reserved_percentage = %d" instead.') % (
|
'Please set "reserved_percentage = %d" instead.' %
|
||||||
result)
|
result)
|
||||||
mock_report.assert_called_once_with(block_base.LOG, msg)
|
mock_report.assert_called_once_with(block_base.LOG, msg)
|
||||||
|
|
||||||
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
|
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
|
||||||
|
@ -29,7 +29,7 @@ import six
|
|||||||
|
|
||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder import quota
|
from cinder import quota
|
||||||
from cinder import quota_utils
|
from cinder import quota_utils
|
||||||
@ -72,7 +72,7 @@ class API(base.Base):
|
|||||||
volume_utils.notify_about_volume_usage(context, volume_ref,
|
volume_utils.notify_about_volume_usage(context, volume_ref,
|
||||||
"transfer.delete.start")
|
"transfer.delete.start")
|
||||||
if volume_ref['status'] != 'awaiting-transfer':
|
if volume_ref['status'] != 'awaiting-transfer':
|
||||||
LOG.error(_LE("Volume in unexpected state"))
|
LOG.error("Volume in unexpected state")
|
||||||
self.db.transfer_destroy(context, transfer_id)
|
self.db.transfer_destroy(context, transfer_id)
|
||||||
volume_utils.notify_about_volume_usage(context, volume_ref,
|
volume_utils.notify_about_volume_usage(context, volume_ref,
|
||||||
"transfer.delete.end")
|
"transfer.delete.end")
|
||||||
@ -115,7 +115,7 @@ class API(base.Base):
|
|||||||
def create(self, context, volume_id, display_name):
|
def create(self, context, volume_id, display_name):
|
||||||
"""Creates an entry in the transfers table."""
|
"""Creates an entry in the transfers table."""
|
||||||
volume_api.check_policy(context, 'create_transfer')
|
volume_api.check_policy(context, 'create_transfer')
|
||||||
LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
|
LOG.info("Generating transfer record for volume %s", volume_id)
|
||||||
volume_ref = self.db.volume_get(context, volume_id)
|
volume_ref = self.db.volume_get(context, volume_id)
|
||||||
if volume_ref['status'] != "available":
|
if volume_ref['status'] != "available":
|
||||||
raise exception.InvalidVolume(reason=_("status must be available"))
|
raise exception.InvalidVolume(reason=_("status must be available"))
|
||||||
@ -137,8 +137,7 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
transfer = self.db.transfer_create(context, transfer_rec)
|
transfer = self.db.transfer_create(context, transfer_rec)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Failed to create transfer record "
|
LOG.error("Failed to create transfer record for %s", volume_id)
|
||||||
"for %s"), volume_id)
|
|
||||||
raise
|
raise
|
||||||
volume_utils.notify_about_volume_usage(context, volume_ref,
|
volume_utils.notify_about_volume_usage(context, volume_ref,
|
||||||
"transfer.create.end")
|
"transfer.create.end")
|
||||||
@ -200,8 +199,8 @@ class API(base.Base):
|
|||||||
**reserve_opts)
|
**reserve_opts)
|
||||||
except Exception:
|
except Exception:
|
||||||
donor_reservations = None
|
donor_reservations = None
|
||||||
LOG.exception(_LE("Failed to update quota donating volume"
|
LOG.exception("Failed to update quota donating volume"
|
||||||
" transfer id %s"), transfer_id)
|
" transfer id %s", transfer_id)
|
||||||
|
|
||||||
volume_utils.notify_about_volume_usage(context, vol_ref,
|
volume_utils.notify_about_volume_usage(context, vol_ref,
|
||||||
"transfer.accept.start")
|
"transfer.accept.start")
|
||||||
@ -219,7 +218,7 @@ class API(base.Base):
|
|||||||
QUOTAS.commit(context, reservations)
|
QUOTAS.commit(context, reservations)
|
||||||
if donor_reservations:
|
if donor_reservations:
|
||||||
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
|
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
|
||||||
LOG.info(_LI("Volume %s has been transferred."), volume_id)
|
LOG.info("Volume %s has been transferred.", volume_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
QUOTAS.rollback(context, reservations)
|
QUOTAS.rollback(context, reservations)
|
||||||
|
@ -53,7 +53,7 @@ import six
|
|||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder import keymgr
|
from cinder import keymgr
|
||||||
|
|
||||||
|
|
||||||
@ -398,7 +398,7 @@ def robust_file_write(directory, filename, data):
|
|||||||
os.fsync(dirfd)
|
os.fsync(dirfd)
|
||||||
except OSError:
|
except OSError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to write persistence file: %(path)s."),
|
LOG.error("Failed to write persistence file: %(path)s.",
|
||||||
{'path': os.path.join(directory, filename)})
|
{'path': os.path.join(directory, filename)})
|
||||||
if os.path.isfile(tempname):
|
if os.path.isfile(tempname):
|
||||||
os.unlink(tempname)
|
os.unlink(tempname)
|
||||||
@ -535,7 +535,7 @@ def require_driver_initialized(driver):
|
|||||||
# we can't do anything if the driver didn't init
|
# we can't do anything if the driver didn't init
|
||||||
if not driver.initialized:
|
if not driver.initialized:
|
||||||
driver_name = driver.__class__.__name__
|
driver_name = driver.__class__.__name__
|
||||||
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
|
LOG.error("Volume driver %s not initialized", driver_name)
|
||||||
raise exception.DriverNotInitialized()
|
raise exception.DriverNotInitialized()
|
||||||
else:
|
else:
|
||||||
log_unsupported_driver_warning(driver)
|
log_unsupported_driver_warning(driver)
|
||||||
@ -545,9 +545,9 @@ def log_unsupported_driver_warning(driver):
|
|||||||
"""Annoy the log about unsupported drivers."""
|
"""Annoy the log about unsupported drivers."""
|
||||||
if not driver.supported:
|
if not driver.supported:
|
||||||
# Check to see if the driver is flagged as supported.
|
# Check to see if the driver is flagged as supported.
|
||||||
LOG.warning(_LW("Volume driver (%(driver_name)s %(version)s) is "
|
LOG.warning("Volume driver (%(driver_name)s %(version)s) is "
|
||||||
"currently unsupported and may be removed in the "
|
"currently unsupported and may be removed in the "
|
||||||
"next release of OpenStack. Use at your own risk."),
|
"next release of OpenStack. Use at your own risk.",
|
||||||
{'driver_name': driver.__class__.__name__,
|
{'driver_name': driver.__class__.__name__,
|
||||||
'version': driver.get_version()},
|
'version': driver.get_version()},
|
||||||
resource={'type': 'driver',
|
resource={'type': 'driver',
|
||||||
@ -944,7 +944,7 @@ def setup_tracing(trace_flags):
|
|||||||
except TypeError: # Handle when trace_flags is None or a test mock
|
except TypeError: # Handle when trace_flags is None or a test mock
|
||||||
trace_flags = []
|
trace_flags = []
|
||||||
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
|
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
|
||||||
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
|
LOG.warning('Invalid trace flag: %s', invalid_flag)
|
||||||
TRACE_METHOD = 'method' in trace_flags
|
TRACE_METHOD = 'method' in trace_flags
|
||||||
TRACE_API = 'api' in trace_flags
|
TRACE_API = 'api' in trace_flags
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ from cinder import db
|
|||||||
from cinder.db import base
|
from cinder.db import base
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import flow_utils
|
from cinder import flow_utils
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.image import cache as image_cache
|
from cinder.image import cache as image_cache
|
||||||
from cinder.image import glance
|
from cinder.image import glance
|
||||||
from cinder import keymgr as key_manager
|
from cinder import keymgr as key_manager
|
||||||
@ -170,7 +170,7 @@ class API(base.Base):
|
|||||||
seconds=CONF.az_cache_duration))
|
seconds=CONF.az_cache_duration))
|
||||||
else:
|
else:
|
||||||
azs = self.availability_zones
|
azs = self.availability_zones
|
||||||
LOG.info(_LI("Availability Zones retrieved successfully."))
|
LOG.info("Availability Zones retrieved successfully.")
|
||||||
return tuple(azs)
|
return tuple(azs)
|
||||||
|
|
||||||
def _retype_is_possible(self, context,
|
def _retype_is_possible(self, context,
|
||||||
@ -349,7 +349,7 @@ class API(base.Base):
|
|||||||
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
||||||
flow_engine.run()
|
flow_engine.run()
|
||||||
vref = flow_engine.storage.fetch('volume')
|
vref = flow_engine.storage.fetch('volume')
|
||||||
LOG.info(_LI("Volume created successfully."), resource=vref)
|
LOG.info("Volume created successfully.", resource=vref)
|
||||||
return vref
|
return vref
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -380,8 +380,8 @@ class API(base.Base):
|
|||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
**reserve_opts)
|
**reserve_opts)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to update quota while "
|
LOG.exception("Failed to update quota while "
|
||||||
"deleting volume."))
|
"deleting volume.")
|
||||||
volume.destroy()
|
volume.destroy()
|
||||||
|
|
||||||
if reservations:
|
if reservations:
|
||||||
@ -389,7 +389,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
volume_utils.notify_about_volume_usage(context,
|
volume_utils.notify_about_volume_usage(context,
|
||||||
volume, "delete.end")
|
volume, "delete.end")
|
||||||
LOG.info(_LI("Delete volume request issued successfully."),
|
LOG.info("Delete volume request issued successfully.",
|
||||||
resource={'type': 'volume',
|
resource={'type': 'volume',
|
||||||
'id': volume.id})
|
'id': volume.id})
|
||||||
return
|
return
|
||||||
@ -468,14 +468,14 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
self.key_manager.delete(context, encryption_key_id)
|
self.key_manager.delete(context, encryption_key_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Unable to delete encryption key for "
|
LOG.warning("Unable to delete encryption key for "
|
||||||
"volume: %s."), e.msg, resource=volume)
|
"volume: %s.", e.msg, resource=volume)
|
||||||
|
|
||||||
self.volume_rpcapi.delete_volume(context,
|
self.volume_rpcapi.delete_volume(context,
|
||||||
volume,
|
volume,
|
||||||
unmanage_only,
|
unmanage_only,
|
||||||
cascade)
|
cascade)
|
||||||
LOG.info(_LI("Delete volume request issued successfully."),
|
LOG.info("Delete volume request issued successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -488,8 +488,8 @@ class API(base.Base):
|
|||||||
volume = objects.Volume._from_db_object(context, vol_obj, volume)
|
volume = objects.Volume._from_db_object(context, vol_obj, volume)
|
||||||
|
|
||||||
if volume.status == 'maintenance':
|
if volume.status == 'maintenance':
|
||||||
LOG.info(_LI("Unable to update volume, "
|
LOG.info("Unable to update volume, "
|
||||||
"because it is in maintenance."), resource=volume)
|
"because it is in maintenance.", resource=volume)
|
||||||
msg = _("The volume cannot be updated during maintenance.")
|
msg = _("The volume cannot be updated during maintenance.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
@ -497,7 +497,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
volume.update(fields)
|
volume.update(fields)
|
||||||
volume.save()
|
volume.save()
|
||||||
LOG.info(_LI("Volume updated successfully."), resource=volume)
|
LOG.info("Volume updated successfully.", resource=volume)
|
||||||
|
|
||||||
def get(self, context, volume_id, viewable_admin_meta=False):
|
def get(self, context, volume_id, viewable_admin_meta=False):
|
||||||
volume = objects.Volume.get_by_id(context, volume_id)
|
volume = objects.Volume.get_by_id(context, volume_id)
|
||||||
@ -516,7 +516,7 @@ class API(base.Base):
|
|||||||
volume.admin_metadata = admin_metadata
|
volume.admin_metadata = admin_metadata
|
||||||
volume.obj_reset_changes()
|
volume.obj_reset_changes()
|
||||||
|
|
||||||
LOG.info(_LI("Volume info retrieved successfully."), resource=volume)
|
LOG.info("Volume info retrieved successfully.", resource=volume)
|
||||||
return volume
|
return volume
|
||||||
|
|
||||||
def get_all(self, context, marker=None, limit=None, sort_keys=None,
|
def get_all(self, context, marker=None, limit=None, sort_keys=None,
|
||||||
@ -565,7 +565,7 @@ class API(base.Base):
|
|||||||
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
|
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
|
||||||
offset=offset)
|
offset=offset)
|
||||||
|
|
||||||
LOG.info(_LI("Get all volumes completed successfully."))
|
LOG.info("Get all volumes completed successfully.")
|
||||||
return volumes
|
return volumes
|
||||||
|
|
||||||
def get_volume_summary(self, context, filters=None):
|
def get_volume_summary(self, context, filters=None):
|
||||||
@ -583,7 +583,7 @@ class API(base.Base):
|
|||||||
volumes = objects.VolumeList.get_volume_summary_by_project(
|
volumes = objects.VolumeList.get_volume_summary_by_project(
|
||||||
context, context.project_id)
|
context, context.project_id)
|
||||||
|
|
||||||
LOG.info(_LI("Get summary completed successfully."))
|
LOG.info("Get summary completed successfully.")
|
||||||
return volumes
|
return volumes
|
||||||
|
|
||||||
def get_snapshot(self, context, snapshot_id):
|
def get_snapshot(self, context, snapshot_id):
|
||||||
@ -592,7 +592,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
# FIXME(jdg): The objects don't have the db name entries
|
# FIXME(jdg): The objects don't have the db name entries
|
||||||
# so build the resource tag manually for now.
|
# so build the resource tag manually for now.
|
||||||
LOG.info(_LI("Snapshot retrieved successfully."),
|
LOG.info("Snapshot retrieved successfully.",
|
||||||
resource={'type': 'snapshot',
|
resource={'type': 'snapshot',
|
||||||
'id': snapshot.id})
|
'id': snapshot.id})
|
||||||
return snapshot
|
return snapshot
|
||||||
@ -600,7 +600,7 @@ class API(base.Base):
|
|||||||
def get_volume(self, context, volume_id):
|
def get_volume(self, context, volume_id):
|
||||||
check_policy(context, 'get_volume')
|
check_policy(context, 'get_volume')
|
||||||
volume = objects.Volume.get_by_id(context, volume_id)
|
volume = objects.Volume.get_by_id(context, volume_id)
|
||||||
LOG.info(_LI("Volume retrieved successfully."), resource=volume)
|
LOG.info("Volume retrieved successfully.", resource=volume)
|
||||||
return volume
|
return volume
|
||||||
|
|
||||||
def get_all_snapshots(self, context, search_opts=None, marker=None,
|
def get_all_snapshots(self, context, search_opts=None, marker=None,
|
||||||
@ -621,7 +621,7 @@ class API(base.Base):
|
|||||||
context, context.project_id, search_opts, marker, limit,
|
context, context.project_id, search_opts, marker, limit,
|
||||||
sort_keys, sort_dirs, offset)
|
sort_keys, sort_dirs, offset)
|
||||||
|
|
||||||
LOG.info(_LI("Get all snapshots completed successfully."))
|
LOG.info("Get all snapshots completed successfully.")
|
||||||
return snapshots
|
return snapshots
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -640,7 +640,7 @@ class API(base.Base):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Reserve volume completed successfully."),
|
LOG.info("Reserve volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -658,7 +658,7 @@ class API(base.Base):
|
|||||||
resource=volume)
|
resource=volume)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Unreserve volume completed successfully."),
|
LOG.info("Unreserve volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -678,22 +678,22 @@ class API(base.Base):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Begin detaching volume completed successfully."),
|
LOG.info("Begin detaching volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def roll_detaching(self, context, volume):
|
def roll_detaching(self, context, volume):
|
||||||
volume.conditional_update({'status': 'in-use'},
|
volume.conditional_update({'status': 'in-use'},
|
||||||
{'status': 'detaching'})
|
{'status': 'detaching'})
|
||||||
LOG.info(_LI("Roll detaching of volume completed successfully."),
|
LOG.info("Roll detaching of volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def attach(self, context, volume, instance_uuid, host_name,
|
def attach(self, context, volume, instance_uuid, host_name,
|
||||||
mountpoint, mode):
|
mountpoint, mode):
|
||||||
if volume.status == 'maintenance':
|
if volume.status == 'maintenance':
|
||||||
LOG.info(_LI('Unable to attach volume, '
|
LOG.info('Unable to attach volume, '
|
||||||
'because it is in maintenance.'), resource=volume)
|
'because it is in maintenance.', resource=volume)
|
||||||
msg = _("The volume cannot be attached in maintenance mode.")
|
msg = _("The volume cannot be attached in maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
@ -712,36 +712,36 @@ class API(base.Base):
|
|||||||
host_name,
|
host_name,
|
||||||
mountpoint,
|
mountpoint,
|
||||||
mode)
|
mode)
|
||||||
LOG.info(_LI("Attach volume completed successfully."),
|
LOG.info("Attach volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return attach_results
|
return attach_results
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def detach(self, context, volume, attachment_id):
|
def detach(self, context, volume, attachment_id):
|
||||||
if volume['status'] == 'maintenance':
|
if volume['status'] == 'maintenance':
|
||||||
LOG.info(_LI('Unable to detach volume, '
|
LOG.info('Unable to detach volume, '
|
||||||
'because it is in maintenance.'), resource=volume)
|
'because it is in maintenance.', resource=volume)
|
||||||
msg = _("The volume cannot be detached in maintenance mode.")
|
msg = _("The volume cannot be detached in maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
detach_results = self.volume_rpcapi.detach_volume(context, volume,
|
detach_results = self.volume_rpcapi.detach_volume(context, volume,
|
||||||
attachment_id)
|
attachment_id)
|
||||||
LOG.info(_LI("Detach volume completed successfully."),
|
LOG.info("Detach volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return detach_results
|
return detach_results
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def initialize_connection(self, context, volume, connector):
|
def initialize_connection(self, context, volume, connector):
|
||||||
if volume.status == 'maintenance':
|
if volume.status == 'maintenance':
|
||||||
LOG.info(_LI('Unable to initialize the connection for '
|
LOG.info('Unable to initialize the connection for '
|
||||||
'volume, because it is in '
|
'volume, because it is in '
|
||||||
'maintenance.'), resource=volume)
|
'maintenance.', resource=volume)
|
||||||
msg = _("The volume connection cannot be initialized in "
|
msg = _("The volume connection cannot be initialized in "
|
||||||
"maintenance mode.")
|
"maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
init_results = self.volume_rpcapi.initialize_connection(context,
|
init_results = self.volume_rpcapi.initialize_connection(context,
|
||||||
volume,
|
volume,
|
||||||
connector)
|
connector)
|
||||||
LOG.info(_LI("Initialize volume connection completed successfully."),
|
LOG.info("Initialize volume connection completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return init_results
|
return init_results
|
||||||
|
|
||||||
@ -751,22 +751,22 @@ class API(base.Base):
|
|||||||
volume,
|
volume,
|
||||||
connector,
|
connector,
|
||||||
force)
|
force)
|
||||||
LOG.info(_LI("Terminate volume connection completed successfully."),
|
LOG.info("Terminate volume connection completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
self.unreserve_volume(context, volume)
|
self.unreserve_volume(context, volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def accept_transfer(self, context, volume, new_user, new_project):
|
def accept_transfer(self, context, volume, new_user, new_project):
|
||||||
if volume['status'] == 'maintenance':
|
if volume['status'] == 'maintenance':
|
||||||
LOG.info(_LI('Unable to accept transfer for volume, '
|
LOG.info('Unable to accept transfer for volume, '
|
||||||
'because it is in maintenance.'), resource=volume)
|
'because it is in maintenance.', resource=volume)
|
||||||
msg = _("The volume cannot accept transfer in maintenance mode.")
|
msg = _("The volume cannot accept transfer in maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
results = self.volume_rpcapi.accept_transfer(context,
|
results = self.volume_rpcapi.accept_transfer(context,
|
||||||
volume,
|
volume,
|
||||||
new_user,
|
new_user,
|
||||||
new_project)
|
new_project)
|
||||||
LOG.info(_LI("Transfer volume completed successfully."),
|
LOG.info("Transfer volume completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@ -798,8 +798,8 @@ class API(base.Base):
|
|||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
if volume['status'] == 'maintenance':
|
if volume['status'] == 'maintenance':
|
||||||
LOG.info(_LI('Unable to create the snapshot for volume, '
|
LOG.info('Unable to create the snapshot for volume, '
|
||||||
'because it is in maintenance.'), resource=volume)
|
'because it is in maintenance.', resource=volume)
|
||||||
msg = _("The snapshot cannot be created when the volume is in "
|
msg = _("The snapshot cannot be created when the volume is in "
|
||||||
"maintenance mode.")
|
"maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
@ -911,8 +911,8 @@ class API(base.Base):
|
|||||||
check_policy(context, 'create_snapshot', volume)
|
check_policy(context, 'create_snapshot', volume)
|
||||||
|
|
||||||
if volume['status'] == 'maintenance':
|
if volume['status'] == 'maintenance':
|
||||||
LOG.info(_LI('Unable to create the snapshot for volume, '
|
LOG.info('Unable to create the snapshot for volume, '
|
||||||
'because it is in maintenance.'), resource=volume)
|
'because it is in maintenance.', resource=volume)
|
||||||
msg = _("The snapshot cannot be created when the volume is in "
|
msg = _("The snapshot cannot be created when the volume is in "
|
||||||
"maintenance mode.")
|
"maintenance mode.")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
@ -981,7 +981,7 @@ class API(base.Base):
|
|||||||
result = self._create_snapshot(context, volume, name, description,
|
result = self._create_snapshot(context, volume, name, description,
|
||||||
False, metadata, cgsnapshot_id,
|
False, metadata, cgsnapshot_id,
|
||||||
group_snapshot_id)
|
group_snapshot_id)
|
||||||
LOG.info(_LI("Snapshot create request issued successfully."),
|
LOG.info("Snapshot create request issued successfully.",
|
||||||
resource=result)
|
resource=result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -990,7 +990,7 @@ class API(base.Base):
|
|||||||
description, metadata=None):
|
description, metadata=None):
|
||||||
result = self._create_snapshot(context, volume, name, description,
|
result = self._create_snapshot(context, volume, name, description,
|
||||||
True, metadata)
|
True, metadata)
|
||||||
LOG.info(_LI("Snapshot force create request issued successfully."),
|
LOG.info("Snapshot force create request issued successfully.",
|
||||||
resource=result)
|
resource=result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -1021,7 +1021,7 @@ class API(base.Base):
|
|||||||
raise exception.InvalidSnapshot(reason=msg)
|
raise exception.InvalidSnapshot(reason=msg)
|
||||||
|
|
||||||
self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only)
|
self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only)
|
||||||
LOG.info(_LI("Snapshot delete request issued successfully."),
|
LOG.info("Snapshot delete request issued successfully.",
|
||||||
resource=snapshot)
|
resource=snapshot)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -1033,7 +1033,7 @@ class API(base.Base):
|
|||||||
def get_volume_metadata(self, context, volume):
|
def get_volume_metadata(self, context, volume):
|
||||||
"""Get all metadata associated with a volume."""
|
"""Get all metadata associated with a volume."""
|
||||||
rv = self.db.volume_metadata_get(context, volume['id'])
|
rv = self.db.volume_metadata_get(context, volume['id'])
|
||||||
LOG.info(_LI("Get volume metadata completed successfully."),
|
LOG.info("Get volume metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return dict(rv)
|
return dict(rv)
|
||||||
|
|
||||||
@ -1042,7 +1042,7 @@ class API(base.Base):
|
|||||||
"""Creates volume metadata."""
|
"""Creates volume metadata."""
|
||||||
db_meta = self._update_volume_metadata(context, volume, metadata)
|
db_meta = self._update_volume_metadata(context, volume, metadata)
|
||||||
|
|
||||||
LOG.info(_LI("Create volume metadata completed successfully."),
|
LOG.info("Create volume metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return db_meta
|
return db_meta
|
||||||
|
|
||||||
@ -1056,7 +1056,7 @@ class API(base.Base):
|
|||||||
LOG.info(msg, resource=volume)
|
LOG.info(msg, resource=volume)
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
self.db.volume_metadata_delete(context, volume.id, key, meta_type)
|
self.db.volume_metadata_delete(context, volume.id, key, meta_type)
|
||||||
LOG.info(_LI("Delete volume metadata completed successfully."),
|
LOG.info("Delete volume metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
def _update_volume_metadata(self, context, volume, metadata, delete=False,
|
def _update_volume_metadata(self, context, volume, metadata, delete=False,
|
||||||
@ -1084,7 +1084,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
||||||
|
|
||||||
LOG.info(_LI("Update volume metadata completed successfully."),
|
LOG.info("Update volume metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return db_meta
|
return db_meta
|
||||||
|
|
||||||
@ -1092,7 +1092,7 @@ class API(base.Base):
|
|||||||
def get_volume_admin_metadata(self, context, volume):
|
def get_volume_admin_metadata(self, context, volume):
|
||||||
"""Get all administration metadata associated with a volume."""
|
"""Get all administration metadata associated with a volume."""
|
||||||
rv = self.db.volume_admin_metadata_get(context, volume['id'])
|
rv = self.db.volume_admin_metadata_get(context, volume['id'])
|
||||||
LOG.info(_LI("Get volume admin metadata completed successfully."),
|
LOG.info("Get volume admin metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return dict(rv)
|
return dict(rv)
|
||||||
|
|
||||||
@ -1112,7 +1112,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
||||||
|
|
||||||
LOG.info(_LI("Update volume admin metadata completed successfully."),
|
LOG.info("Update volume admin metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return db_meta
|
return db_meta
|
||||||
|
|
||||||
@ -1120,7 +1120,7 @@ class API(base.Base):
|
|||||||
def get_snapshot_metadata(self, context, snapshot):
|
def get_snapshot_metadata(self, context, snapshot):
|
||||||
"""Get all metadata associated with a snapshot."""
|
"""Get all metadata associated with a snapshot."""
|
||||||
snapshot_obj = self.get_snapshot(context, snapshot.id)
|
snapshot_obj = self.get_snapshot(context, snapshot.id)
|
||||||
LOG.info(_LI("Get snapshot metadata completed successfully."),
|
LOG.info("Get snapshot metadata completed successfully.",
|
||||||
resource=snapshot)
|
resource=snapshot)
|
||||||
return snapshot_obj.metadata
|
return snapshot_obj.metadata
|
||||||
|
|
||||||
@ -1129,7 +1129,7 @@ class API(base.Base):
|
|||||||
"""Delete the given metadata item from a snapshot."""
|
"""Delete the given metadata item from a snapshot."""
|
||||||
snapshot_obj = self.get_snapshot(context, snapshot.id)
|
snapshot_obj = self.get_snapshot(context, snapshot.id)
|
||||||
snapshot_obj.delete_metadata_key(context, key)
|
snapshot_obj.delete_metadata_key(context, key)
|
||||||
LOG.info(_LI("Delete snapshot metadata completed successfully."),
|
LOG.info("Delete snapshot metadata completed successfully.",
|
||||||
resource=snapshot)
|
resource=snapshot)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -1156,12 +1156,12 @@ class API(base.Base):
|
|||||||
|
|
||||||
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
||||||
|
|
||||||
LOG.info(_LI("Update snapshot metadata completed successfully."),
|
LOG.info("Update snapshot metadata completed successfully.",
|
||||||
resource=snapshot)
|
resource=snapshot)
|
||||||
return snapshot.metadata
|
return snapshot.metadata
|
||||||
|
|
||||||
def get_snapshot_metadata_value(self, snapshot, key):
|
def get_snapshot_metadata_value(self, snapshot, key):
|
||||||
LOG.info(_LI("Get snapshot metadata value not implemented."),
|
LOG.info("Get snapshot metadata value not implemented.",
|
||||||
resource=snapshot)
|
resource=snapshot)
|
||||||
# FIXME(jdg): Huh? Pass?
|
# FIXME(jdg): Huh? Pass?
|
||||||
pass
|
pass
|
||||||
@ -1178,7 +1178,7 @@ class API(base.Base):
|
|||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def get_volume_image_metadata(self, context, volume):
|
def get_volume_image_metadata(self, context, volume):
|
||||||
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
|
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
|
||||||
LOG.info(_LI("Get volume image-metadata completed successfully."),
|
LOG.info("Get volume image-metadata completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
|
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
|
||||||
|
|
||||||
@ -1195,8 +1195,8 @@ class API(base.Base):
|
|||||||
def copy_volume_to_image(self, context, volume, metadata, force):
|
def copy_volume_to_image(self, context, volume, metadata, force):
|
||||||
"""Create a new image from the specified volume."""
|
"""Create a new image from the specified volume."""
|
||||||
if not CONF.enable_force_upload and force:
|
if not CONF.enable_force_upload and force:
|
||||||
LOG.info(_LI("Force upload to image is disabled, "
|
LOG.info("Force upload to image is disabled, "
|
||||||
"Force option will be ignored."),
|
"Force option will be ignored.",
|
||||||
resource={'type': 'volume', 'id': volume['id']})
|
resource={'type': 'volume', 'id': volume['id']})
|
||||||
force = False
|
force = False
|
||||||
|
|
||||||
@ -1262,7 +1262,7 @@ class API(base.Base):
|
|||||||
response['is_public'] = recv_metadata.get('is_public')
|
response['is_public'] = recv_metadata.get('is_public')
|
||||||
elif 'visibility' in recv_metadata:
|
elif 'visibility' in recv_metadata:
|
||||||
response['visibility'] = recv_metadata.get('visibility')
|
response['visibility'] = recv_metadata.get('visibility')
|
||||||
LOG.info(_LI("Copy volume to image completed successfully."),
|
LOG.info("Copy volume to image completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@ -1272,7 +1272,7 @@ class API(base.Base):
|
|||||||
expected = {'status': 'available'}
|
expected = {'status': 'available'}
|
||||||
|
|
||||||
def _roll_back_status():
|
def _roll_back_status():
|
||||||
msg = _LE('Could not return volume %s to available.')
|
msg = _('Could not return volume %s to available.')
|
||||||
try:
|
try:
|
||||||
if not volume.conditional_update(expected, value):
|
if not volume.conditional_update(expected, value):
|
||||||
LOG.error(msg, volume.id)
|
LOG.error(msg, volume.id)
|
||||||
@ -1323,13 +1323,13 @@ class API(base.Base):
|
|||||||
gb_quotas = exc.kwargs['quotas']['gigabytes']
|
gb_quotas = exc.kwargs['quotas']['gigabytes']
|
||||||
|
|
||||||
consumed = gigabytes['reserved'] + gigabytes['in_use']
|
consumed = gigabytes['reserved'] + gigabytes['in_use']
|
||||||
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
|
LOG.error("Quota exceeded for %(s_pid)s, tried to extend volume "
|
||||||
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
|
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
|
||||||
"already consumed).")
|
"already consumed).",
|
||||||
LOG.error(msg, {'s_pid': context.project_id,
|
{'s_pid': context.project_id,
|
||||||
's_size': size_increase,
|
's_size': size_increase,
|
||||||
'd_consumed': consumed,
|
'd_consumed': consumed,
|
||||||
'd_quota': gb_quotas})
|
'd_quota': gb_quotas})
|
||||||
raise exception.VolumeSizeExceedsAvailableQuota(
|
raise exception.VolumeSizeExceedsAvailableQuota(
|
||||||
requested=size_increase, consumed=consumed, quota=gb_quotas)
|
requested=size_increase, consumed=consumed, quota=gb_quotas)
|
||||||
finally:
|
finally:
|
||||||
@ -1357,15 +1357,15 @@ class API(base.Base):
|
|||||||
# NOTE(erlon): During rolling upgrades scheduler and volume can
|
# NOTE(erlon): During rolling upgrades scheduler and volume can
|
||||||
# have different versions. This check makes sure that a new
|
# have different versions. This check makes sure that a new
|
||||||
# version of the volume service won't break.
|
# version of the volume service won't break.
|
||||||
msg = _LW("Failed to send extend volume request to scheduler. "
|
msg = ("Failed to send extend volume request to scheduler. "
|
||||||
"Falling back to old behaviour. This is normal during a "
|
"Falling back to old behaviour. This is normal during a "
|
||||||
"live-upgrade. Error: %(e)s")
|
"live-upgrade. Error: %(e)s")
|
||||||
LOG.warning(msg, {'e': e})
|
LOG.warning(msg, {'e': e})
|
||||||
# TODO(erlon): Remove in Pike
|
# TODO(erlon): Remove in Pike
|
||||||
self.volume_rpcapi.extend_volume(context, volume, new_size,
|
self.volume_rpcapi.extend_volume(context, volume, new_size,
|
||||||
reservations)
|
reservations)
|
||||||
|
|
||||||
LOG.info(_LI("Extend volume request issued successfully."),
|
LOG.info("Extend volume request issued successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -1451,7 +1451,7 @@ class API(base.Base):
|
|||||||
cluster_name or host,
|
cluster_name or host,
|
||||||
force_copy,
|
force_copy,
|
||||||
request_spec)
|
request_spec)
|
||||||
LOG.info(_LI("Migrate volume request issued successfully."),
|
LOG.info("Migrate volume request issued successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -1490,7 +1490,7 @@ class API(base.Base):
|
|||||||
'exp': expected_status})
|
'exp': expected_status})
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Migrate volume completion issued successfully."),
|
LOG.info("Migrate volume completion issued successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
return self.volume_rpcapi.migrate_volume_completion(context, volume,
|
return self.volume_rpcapi.migrate_volume_completion(context, volume,
|
||||||
new_volume, error)
|
new_volume, error)
|
||||||
@ -1505,8 +1505,8 @@ class API(base.Base):
|
|||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
self.update_volume_admin_metadata(context.elevated(), volume,
|
self.update_volume_admin_metadata(context.elevated(), volume,
|
||||||
{'readonly': six.text_type(flag)})
|
{'readonly': six.text_type(flag)})
|
||||||
LOG.info(_LI("Update readonly setting on volume "
|
LOG.info("Update readonly setting on volume "
|
||||||
"completed successfully."),
|
"completed successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -1592,7 +1592,7 @@ class API(base.Base):
|
|||||||
self.scheduler_rpcapi.retype(context, volume,
|
self.scheduler_rpcapi.retype(context, volume,
|
||||||
request_spec=request_spec,
|
request_spec=request_spec,
|
||||||
filter_properties={})
|
filter_properties={})
|
||||||
LOG.info(_LI("Retype volume request issued successfully."),
|
LOG.info("Retype volume request issued successfully.",
|
||||||
resource=volume)
|
resource=volume)
|
||||||
|
|
||||||
def _get_service_by_host_cluster(self, context, host, cluster_name,
|
def _get_service_by_host_cluster(self, context, host, cluster_name,
|
||||||
@ -1613,20 +1613,20 @@ class API(base.Base):
|
|||||||
cluster_name=svc_cluster)
|
cluster_name=svc_cluster)
|
||||||
except exception.ServiceNotFound:
|
except exception.ServiceNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Unable to find service: %(service)s for '
|
LOG.error('Unable to find service: %(service)s for '
|
||||||
'given host: %(host)s and cluster %(cluster)s.'),
|
'given host: %(host)s and cluster %(cluster)s.',
|
||||||
{'service': constants.VOLUME_BINARY, 'host': host,
|
{'service': constants.VOLUME_BINARY, 'host': host,
|
||||||
'cluster': cluster_name})
|
'cluster': cluster_name})
|
||||||
|
|
||||||
if service.disabled and (not service.cluster_name or
|
if service.disabled and (not service.cluster_name or
|
||||||
service.cluster.disabled):
|
service.cluster.disabled):
|
||||||
LOG.error(_LE('Unable to manage existing %s on a disabled '
|
LOG.error('Unable to manage existing %s on a disabled '
|
||||||
'service.'), resource)
|
'service.', resource)
|
||||||
raise exception.ServiceUnavailable()
|
raise exception.ServiceUnavailable()
|
||||||
|
|
||||||
if not service.is_up:
|
if not service.is_up:
|
||||||
LOG.error(_LE('Unable to manage existing %s on a service that is '
|
LOG.error('Unable to manage existing %s on a service that is '
|
||||||
'down.'), resource)
|
'down.', resource)
|
||||||
raise exception.ServiceUnavailable()
|
raise exception.ServiceUnavailable()
|
||||||
|
|
||||||
return service
|
return service
|
||||||
@ -1673,7 +1673,7 @@ class API(base.Base):
|
|||||||
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
||||||
flow_engine.run()
|
flow_engine.run()
|
||||||
vol_ref = flow_engine.storage.fetch('volume')
|
vol_ref = flow_engine.storage.fetch('volume')
|
||||||
LOG.info(_LI("Manage volume request issued successfully."),
|
LOG.info("Manage volume request issued successfully.",
|
||||||
resource=vol_ref)
|
resource=vol_ref)
|
||||||
return vol_ref
|
return vol_ref
|
||||||
|
|
||||||
@ -1791,7 +1791,7 @@ class API(base.Base):
|
|||||||
cluster.save()
|
cluster.save()
|
||||||
raise exception.InvalidInput(
|
raise exception.InvalidInput(
|
||||||
reason=_('No service could be changed: %s') % msg)
|
reason=_('No service could be changed: %s') % msg)
|
||||||
LOG.warning(_LW('Some services could not be changed: %s'), msg)
|
LOG.warning('Some services could not be changed: %s', msg)
|
||||||
|
|
||||||
return cluster, services
|
return cluster, services
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ from oslo_utils import excutils
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder.image import image_utils
|
from cinder.image import image_utils
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
@ -424,8 +424,8 @@ class BaseVD(object):
|
|||||||
self._is_non_recoverable(ex.stderr, non_recoverable):
|
self._is_non_recoverable(ex.stderr, non_recoverable):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
LOG.exception(_LE("Recovering from a failed execute. "
|
LOG.exception("Recovering from a failed execute. "
|
||||||
"Try number %s"), tries)
|
"Try number %s", tries)
|
||||||
time.sleep(tries ** 2)
|
time.sleep(tries ** 2)
|
||||||
|
|
||||||
def _detach_volume(self, context, attach_info, volume, properties,
|
def _detach_volume(self, context, attach_info, volume, properties,
|
||||||
@ -458,8 +458,8 @@ class BaseVD(object):
|
|||||||
LOG.debug("volume %s: removing export", volume['id'])
|
LOG.debug("volume %s: removing export", volume['id'])
|
||||||
self.remove_export(context, volume)
|
self.remove_export(context, volume)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(_LE("Error detaching volume %(volume)s, "
|
LOG.exception("Error detaching volume %(volume)s, "
|
||||||
"due to remove export failure."),
|
"due to remove export failure.",
|
||||||
{"volume": volume['id']})
|
{"volume": volume['id']})
|
||||||
raise exception.RemoveExportException(volume=volume['id'],
|
raise exception.RemoveExportException(volume=volume['id'],
|
||||||
reason=ex)
|
reason=ex)
|
||||||
@ -480,8 +480,8 @@ class BaseVD(object):
|
|||||||
# flag in the interface is for anticipation that it will be enabled
|
# flag in the interface is for anticipation that it will be enabled
|
||||||
# in the future.
|
# in the future.
|
||||||
if remote:
|
if remote:
|
||||||
LOG.error(_LE("Detaching snapshot from a remote node "
|
LOG.error("Detaching snapshot from a remote node "
|
||||||
"is not supported."))
|
"is not supported.")
|
||||||
raise exception.NotSupportedOperation(
|
raise exception.NotSupportedOperation(
|
||||||
operation=_("detach snapshot from remote node"))
|
operation=_("detach snapshot from remote node"))
|
||||||
else:
|
else:
|
||||||
@ -501,8 +501,8 @@ class BaseVD(object):
|
|||||||
LOG.debug("Snapshot %s: removing export.", snapshot.id)
|
LOG.debug("Snapshot %s: removing export.", snapshot.id)
|
||||||
self.remove_export_snapshot(context, snapshot)
|
self.remove_export_snapshot(context, snapshot)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(_LE("Error detaching snapshot %(snapshot)s, "
|
LOG.exception("Error detaching snapshot %(snapshot)s, "
|
||||||
"due to remove export failure."),
|
"due to remove export failure.",
|
||||||
{"snapshot": snapshot.id})
|
{"snapshot": snapshot.id})
|
||||||
raise exception.RemoveExportException(volume=snapshot.id,
|
raise exception.RemoveExportException(volume=snapshot.id,
|
||||||
reason=ex)
|
reason=ex)
|
||||||
@ -532,8 +532,8 @@ class BaseVD(object):
|
|||||||
self._throttle = throttling.BlkioCgroup(int(bps_limit),
|
self._throttle = throttling.BlkioCgroup(int(bps_limit),
|
||||||
cgroup_name)
|
cgroup_name)
|
||||||
except processutils.ProcessExecutionError as err:
|
except processutils.ProcessExecutionError as err:
|
||||||
LOG.warning(_LW('Failed to activate volume copy throttling: '
|
LOG.warning('Failed to activate volume copy throttling: '
|
||||||
'%(err)s'), {'err': err})
|
'%(err)s', {'err': err})
|
||||||
throttling.Throttle.set_default(self._throttle)
|
throttling.Throttle.set_default(self._throttle)
|
||||||
|
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
@ -737,9 +737,9 @@ class BaseVD(object):
|
|||||||
if ':' in vendor_name:
|
if ':' in vendor_name:
|
||||||
old_name = vendor_name
|
old_name = vendor_name
|
||||||
vendor_name = vendor_name.replace(':', '_')
|
vendor_name = vendor_name.replace(':', '_')
|
||||||
LOG.warning(_LW('The colon in vendor name was replaced '
|
LOG.warning('The colon in vendor name was replaced '
|
||||||
'by underscore. Updated vendor name is '
|
'by underscore. Updated vendor name is '
|
||||||
'%(name)s".'), {'name': vendor_name})
|
'%(name)s".', {'name': vendor_name})
|
||||||
|
|
||||||
for key in vendor_prop:
|
for key in vendor_prop:
|
||||||
# If key has colon in vendor name field, we replace it to
|
# If key has colon in vendor name field, we replace it to
|
||||||
@ -751,10 +751,10 @@ class BaseVD(object):
|
|||||||
updated_vendor_prop[new_key] = vendor_prop[key]
|
updated_vendor_prop[new_key] = vendor_prop[key]
|
||||||
continue
|
continue
|
||||||
if not key.startswith(vendor_name + ':'):
|
if not key.startswith(vendor_name + ':'):
|
||||||
LOG.warning(_LW('Vendor unique property "%(property)s" '
|
LOG.warning('Vendor unique property "%(property)s" '
|
||||||
'must start with vendor prefix with colon '
|
'must start with vendor prefix with colon '
|
||||||
'"%(prefix)s". The property was '
|
'"%(prefix)s". The property was '
|
||||||
'not registered on capabilities list.'),
|
'not registered on capabilities list.',
|
||||||
{'prefix': vendor_name + ':',
|
{'prefix': vendor_name + ':',
|
||||||
'property': key})
|
'property': key})
|
||||||
continue
|
continue
|
||||||
@ -952,9 +952,9 @@ class BaseVD(object):
|
|||||||
rpcapi.terminate_connection(context, volume,
|
rpcapi.terminate_connection(context, volume,
|
||||||
properties, force=True)
|
properties, force=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Failed terminating the connection "
|
LOG.warning("Failed terminating the connection "
|
||||||
"of volume %(volume_id)s, but it is "
|
"of volume %(volume_id)s, but it is "
|
||||||
"acceptable."),
|
"acceptable.",
|
||||||
{'volume_id': volume['id']})
|
{'volume_id': volume['id']})
|
||||||
else:
|
else:
|
||||||
# Call local driver's create_export and initialize_connection.
|
# Call local driver's create_export and initialize_connection.
|
||||||
@ -969,9 +969,9 @@ class BaseVD(object):
|
|||||||
volume.save()
|
volume.save()
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
if model_update:
|
if model_update:
|
||||||
LOG.exception(_LE("Failed updating model of volume "
|
LOG.exception("Failed updating model of volume "
|
||||||
"%(volume_id)s with driver provided "
|
"%(volume_id)s with driver provided "
|
||||||
"model %(model)s"),
|
"model %(model)s",
|
||||||
{'volume_id': volume['id'],
|
{'volume_id': volume['id'],
|
||||||
'model': model_update})
|
'model': model_update})
|
||||||
raise exception.ExportFailure(reason=ex)
|
raise exception.ExportFailure(reason=ex)
|
||||||
@ -1008,7 +1008,7 @@ class BaseVD(object):
|
|||||||
properties, force=True,
|
properties, force=True,
|
||||||
remote=remote)
|
remote=remote)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error detaching volume %s'),
|
LOG.exception('Error detaching volume %s',
|
||||||
volume['id'])
|
volume['id'])
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -1024,8 +1024,8 @@ class BaseVD(object):
|
|||||||
# flag in the interface is for anticipation that it will be enabled
|
# flag in the interface is for anticipation that it will be enabled
|
||||||
# in the future.
|
# in the future.
|
||||||
if remote:
|
if remote:
|
||||||
LOG.error(_LE("Attaching snapshot from a remote node "
|
LOG.error("Attaching snapshot from a remote node "
|
||||||
"is not supported."))
|
"is not supported.")
|
||||||
raise exception.NotSupportedOperation(
|
raise exception.NotSupportedOperation(
|
||||||
operation=_("attach snapshot from remote node"))
|
operation=_("attach snapshot from remote node"))
|
||||||
else:
|
else:
|
||||||
@ -1045,9 +1045,9 @@ class BaseVD(object):
|
|||||||
snapshot.save()
|
snapshot.save()
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
if model_update:
|
if model_update:
|
||||||
LOG.exception(_LE("Failed updating model of snapshot "
|
LOG.exception("Failed updating model of snapshot "
|
||||||
"%(snapshot_id)s with driver provided "
|
"%(snapshot_id)s with driver provided "
|
||||||
"model %(model)s."),
|
"model %(model)s.",
|
||||||
{'snapshot_id': snapshot.id,
|
{'snapshot_id': snapshot.id,
|
||||||
'model': model_update})
|
'model': model_update})
|
||||||
raise exception.ExportFailure(reason=ex)
|
raise exception.ExportFailure(reason=ex)
|
||||||
@ -1094,7 +1094,7 @@ class BaseVD(object):
|
|||||||
unavailable = not connector.check_valid_device(host_device,
|
unavailable = not connector.check_valid_device(host_device,
|
||||||
root_access)
|
root_access)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Could not validate device %s'), host_device)
|
LOG.exception('Could not validate device %s', host_device)
|
||||||
|
|
||||||
if unavailable:
|
if unavailable:
|
||||||
raise exception.DeviceUnavailable(path=host_device,
|
raise exception.DeviceUnavailable(path=host_device,
|
||||||
@ -2612,8 +2612,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
def _do_iscsi_discovery(self, volume):
|
def _do_iscsi_discovery(self, volume):
|
||||||
# TODO(justinsb): Deprecate discovery and use stored info
|
# TODO(justinsb): Deprecate discovery and use stored info
|
||||||
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
||||||
LOG.warning(_LW("ISCSI provider_location not "
|
LOG.warning("ISCSI provider_location not stored, using discovery")
|
||||||
"stored, using discovery"))
|
|
||||||
|
|
||||||
volume_name = volume['name']
|
volume_name = volume['name']
|
||||||
|
|
||||||
@ -2626,7 +2625,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
volume['host'].split('@')[0],
|
volume['host'].split('@')[0],
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except processutils.ProcessExecutionError as ex:
|
except processutils.ProcessExecutionError as ex:
|
||||||
LOG.error(_LE("ISCSI discovery attempt failed for:%s"),
|
LOG.error("ISCSI discovery attempt failed for:%s",
|
||||||
volume['host'].split('@')[0])
|
volume['host'].split('@')[0])
|
||||||
LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
|
LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
|
||||||
return None
|
return None
|
||||||
@ -2815,8 +2814,8 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
# iSCSI drivers require the initiator information
|
# iSCSI drivers require the initiator information
|
||||||
required = 'initiator'
|
required = 'initiator'
|
||||||
if required not in connector:
|
if required not in connector:
|
||||||
LOG.error(_LE('The volume driver requires %(data)s '
|
LOG.error('The volume driver requires %(data)s '
|
||||||
'in the connector.'), {'data': required})
|
'in the connector.', {'data': required})
|
||||||
raise exception.InvalidConnectorException(missing=required)
|
raise exception.InvalidConnectorException(missing=required)
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
def terminate_connection(self, volume, connector, **kwargs):
|
||||||
@ -2969,9 +2968,9 @@ class FibreChannelDriver(VolumeDriver):
|
|||||||
def validate_connector_has_setting(connector, setting):
|
def validate_connector_has_setting(connector, setting):
|
||||||
"""Test for non-empty setting in connector."""
|
"""Test for non-empty setting in connector."""
|
||||||
if setting not in connector or not connector[setting]:
|
if setting not in connector or not connector[setting]:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"FibreChannelDriver validate_connector failed. "
|
"FibreChannelDriver validate_connector failed. "
|
||||||
"No '%(setting)s'. Make sure HBA state is Online."),
|
"No '%(setting)s'. Make sure HBA state is Online.",
|
||||||
{'setting': setting})
|
{'setting': setting})
|
||||||
raise exception.InvalidConnectorException(missing=setting)
|
raise exception.InvalidConnectorException(missing=setting)
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -41,9 +40,9 @@ class VolumeDriverUtils(object):
|
|||||||
self._data_namespace
|
self._data_namespace
|
||||||
)
|
)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
LOG.exception(_LE("Failed to get driver initiator data for"
|
LOG.exception("Failed to get driver initiator data for"
|
||||||
" initiator %(initiator)s and namespace"
|
" initiator %(initiator)s and namespace"
|
||||||
" %(namespace)s"),
|
" %(namespace)s",
|
||||||
{'initiator': initiator,
|
{'initiator': initiator,
|
||||||
'namespace': self._data_namespace})
|
'namespace': self._data_namespace})
|
||||||
raise
|
raise
|
||||||
@ -63,9 +62,9 @@ class VolumeDriverUtils(object):
|
|||||||
value
|
value
|
||||||
)
|
)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
LOG.exception(_LE("Failed to insert initiator data for"
|
LOG.exception("Failed to insert initiator data for"
|
||||||
" initiator %(initiator)s and backend"
|
" initiator %(initiator)s and backend"
|
||||||
" %(backend)s for key %(key)s."),
|
" %(backend)s for key %(key)s.",
|
||||||
{'initiator': initiator,
|
{'initiator': initiator,
|
||||||
'backend': self._data_namespace,
|
'backend': self._data_namespace,
|
||||||
'key': key})
|
'key': key})
|
||||||
|
@ -23,7 +23,7 @@ from oslo_utils import units
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.image import image_utils
|
from cinder.image import image_utils
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
@ -87,7 +87,7 @@ class BlockDeviceDriver(driver.BaseVD,
|
|||||||
@utils.synchronized('block_device', external=True)
|
@utils.synchronized('block_device', external=True)
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
device = self.find_appropriate_size_device(volume.size)
|
device = self.find_appropriate_size_device(volume.size)
|
||||||
LOG.info(_LI("Creating %(volume)s on %(device)s"),
|
LOG.info("Creating %(volume)s on %(device)s",
|
||||||
{"volume": volume.name, "device": device})
|
{"volume": volume.name, "device": device})
|
||||||
self._update_provider_location(volume, device)
|
self._update_provider_location(volume, device)
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ class BlockDeviceDriver(driver.BaseVD,
|
|||||||
volume_clear=self.configuration.volume_clear,
|
volume_clear=self.configuration.volume_clear,
|
||||||
volume_clear_size=self.configuration.volume_clear_size)
|
volume_clear_size=self.configuration.volume_clear_size)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("The device %s won't be cleared."), device)
|
LOG.warning("The device %s won't be cleared.", device)
|
||||||
|
|
||||||
if device.status == "error_deleting":
|
if device.status == "error_deleting":
|
||||||
msg = _("Failed to delete device.")
|
msg = _("Failed to delete device.")
|
||||||
@ -141,7 +141,7 @@ class BlockDeviceDriver(driver.BaseVD,
|
|||||||
|
|
||||||
@utils.synchronized('block_device', external=True)
|
@utils.synchronized('block_device', external=True)
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
LOG.info(_LI('Creating clone of volume: %s.'), src_vref.id)
|
LOG.info('Creating clone of volume: %s.', src_vref.id)
|
||||||
device = self.find_appropriate_size_device(src_vref.size)
|
device = self.find_appropriate_size_device(src_vref.size)
|
||||||
dev_size = self._get_devices_sizes([device])
|
dev_size = self._get_devices_sizes([device])
|
||||||
volutils.copy_volume(
|
volutils.copy_volume(
|
||||||
@ -260,7 +260,7 @@ class BlockDeviceDriver(driver.BaseVD,
|
|||||||
LOG.error(msg, resource=volume)
|
LOG.error(msg, resource=volume)
|
||||||
raise exception.CinderException(msg)
|
raise exception.CinderException(msg)
|
||||||
|
|
||||||
LOG.info(_LI('Creating volume snapshot: %s.'), snapshot.id)
|
LOG.info('Creating volume snapshot: %s.', snapshot.id)
|
||||||
device = self.find_appropriate_size_device(snapshot.volume_size)
|
device = self.find_appropriate_size_device(snapshot.volume_size)
|
||||||
dev_size = self._get_devices_sizes([device])
|
dev_size = self._get_devices_sizes([device])
|
||||||
volutils.copy_volume(
|
volutils.copy_volume(
|
||||||
@ -275,7 +275,7 @@ class BlockDeviceDriver(driver.BaseVD,
|
|||||||
|
|
||||||
@utils.synchronized('block_device', external=True)
|
@utils.synchronized('block_device', external=True)
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
LOG.info(_LI('Creating volume %s from snapshot.'), volume.id)
|
LOG.info('Creating volume %s from snapshot.', volume.id)
|
||||||
device = self.find_appropriate_size_device(snapshot.volume_size)
|
device = self.find_appropriate_size_device(snapshot.volume_size)
|
||||||
dev_size = self._get_devices_sizes([device])
|
dev_size = self._get_devices_sizes([device])
|
||||||
volutils.copy_volume(
|
volutils.copy_volume(
|
||||||
|
@ -29,8 +29,6 @@ import six
|
|||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _
|
||||||
from cinder.i18n import _LE
|
|
||||||
from cinder.i18n import _LI
|
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume.drivers.coprhd.helpers import (
|
from cinder.volume.drivers.coprhd.helpers import (
|
||||||
authentication as coprhd_auth)
|
authentication as coprhd_auth)
|
||||||
@ -254,7 +252,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") %
|
coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") %
|
||||||
{'name': name, 'err': six.text_type(e.msg)})
|
{'name': name, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s creation failed") % name)
|
log_err_msg = ("Volume : %s creation failed" % name)
|
||||||
self._raise_or_log_exception(
|
self._raise_or_log_exception(
|
||||||
e.err_code, coprhd_err_msg, log_err_msg)
|
e.err_code, coprhd_err_msg, log_err_msg)
|
||||||
|
|
||||||
@ -283,7 +281,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
" create failed\n%(err)s") %
|
" create failed\n%(err)s") %
|
||||||
{'name': name, 'err': six.text_type(e.msg)})
|
{'name': name, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Consistency Group : %s creation failed") %
|
log_err_msg = ("Consistency Group : %s creation failed" %
|
||||||
name)
|
name)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
@ -321,7 +319,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
" update failed\n%(err)s") %
|
" update failed\n%(err)s") %
|
||||||
{'cg_uri': cg_uri, 'err': six.text_type(e.msg)})
|
{'cg_uri': cg_uri, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Consistency Group : %s update failed") %
|
log_err_msg = ("Consistency Group : %s update failed" %
|
||||||
cg_uri)
|
cg_uri)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
@ -357,7 +355,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
|
|
||||||
volumes_model_update.append(update_item)
|
volumes_model_update.append(update_item)
|
||||||
|
|
||||||
LOG.exception(_LE("Failed to delete the volume %s of CG."),
|
LOG.exception("Failed to delete the volume %s of CG.",
|
||||||
vol['name'])
|
vol['name'])
|
||||||
|
|
||||||
self.consistencygroup_obj.delete(
|
self.consistencygroup_obj.delete(
|
||||||
@ -375,7 +373,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
" delete failed\n%(err)s") %
|
" delete failed\n%(err)s") %
|
||||||
{'name': name, 'err': six.text_type(e.msg)})
|
{'name': name, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Consistency Group : %s deletion failed") %
|
log_err_msg = ("Consistency Group : %s deletion failed" %
|
||||||
name)
|
name)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
@ -395,8 +393,8 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
coprhd_cgid = self._get_coprhd_cgid(cg_id)
|
coprhd_cgid = self._get_coprhd_cgid(cg_id)
|
||||||
cg_name = self._get_consistencygroup_name(cg_group)
|
cg_name = self._get_consistencygroup_name(cg_group)
|
||||||
|
|
||||||
LOG.info(_LI('Start to create cgsnapshot for consistency group'
|
LOG.info('Start to create cgsnapshot for consistency group'
|
||||||
': %(group_name)s'),
|
': %(group_name)s',
|
||||||
{'group_name': cg_name})
|
{'group_name': cg_name})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -484,8 +482,8 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
{'cg_name': cg_name,
|
{'cg_name': cg_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Snapshot %(name)s for Consistency"
|
log_err_msg = ("Snapshot %(name)s for Consistency"
|
||||||
" Group: %(cg_name)s creation failed") %
|
" Group: %(cg_name)s creation failed" %
|
||||||
{'cg_name': cg_name,
|
{'cg_name': cg_name,
|
||||||
'name': cgsnapshot_name})
|
'name': cgsnapshot_name})
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
@ -505,9 +503,9 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
cg_name = self._get_consistencygroup_name(cg_group)
|
cg_name = self._get_consistencygroup_name(cg_group)
|
||||||
|
|
||||||
model_update = {}
|
model_update = {}
|
||||||
LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
|
LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: '
|
||||||
'%(group_name)s'), {'snap_name': cgsnapshot['name'],
|
'%(group_name)s', {'snap_name': cgsnapshot['name'],
|
||||||
'group_name': cg_name})
|
'group_name': cg_name})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
uri = None
|
uri = None
|
||||||
@ -545,8 +543,8 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
'cg_name': cg_name,
|
'cg_name': cg_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Snapshot %(name)s for Consistency"
|
log_err_msg = ("Snapshot %(name)s for Consistency"
|
||||||
" Group: %(cg_name)s deletion failed") %
|
" Group: %(cg_name)s deletion failed" %
|
||||||
{'cg_name': cg_name,
|
{'cg_name': cg_name,
|
||||||
'name': cgsnapshot_name})
|
'name': cgsnapshot_name})
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
@ -618,10 +616,9 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
"%s:%s:%s" % (self.OPENSTACK_TAG, prop,
|
"%s:%s:%s" % (self.OPENSTACK_TAG, prop,
|
||||||
six.text_type(value)))
|
six.text_type(value)))
|
||||||
except TypeError:
|
except TypeError:
|
||||||
LOG.error(
|
LOG.error("Error tagging the resource property %s", prop)
|
||||||
_LE("Error tagging the resource property %s"), prop)
|
|
||||||
except TypeError:
|
except TypeError:
|
||||||
LOG.error(_LE("Error tagging the resource properties"))
|
LOG.error("Error tagging the resource properties")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.tag_obj.tag_resource(
|
self.tag_obj.tag_resource(
|
||||||
@ -683,13 +680,13 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
"", full_project_name, name, True)
|
"", full_project_name, name, True)
|
||||||
|
|
||||||
except IndexError:
|
except IndexError:
|
||||||
LOG.exception(_LE("Volume clone detach returned empty task list"))
|
LOG.exception("Volume clone detach returned empty task list")
|
||||||
|
|
||||||
except coprhd_utils.CoprHdError as e:
|
except coprhd_utils.CoprHdError as e:
|
||||||
coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") %
|
coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") %
|
||||||
{'name': name, 'err': six.text_type(e.msg)})
|
{'name': name, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : {%s} clone failed") % name)
|
log_err_msg = ("Volume : {%s} clone failed" % name)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -712,7 +709,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
{'volume_name': name,
|
{'volume_name': name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s expand failed") % name)
|
log_err_msg = ("Volume : %s expand failed" % name)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -735,8 +732,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
{'volume_name': volume_name,
|
{'volume_name': volume_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s expand failed") %
|
log_err_msg = "Volume : %s expand failed" % volume_name
|
||||||
volume_name)
|
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -789,8 +785,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
{'src_snapshot_name': src_snapshot_name,
|
{'src_snapshot_name': src_snapshot_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Snapshot : %s clone failed") %
|
log_err_msg = "Snapshot : %s clone failed" % src_snapshot_name
|
||||||
src_snapshot_name)
|
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -809,8 +804,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
{'volume_name': new_volume_name,
|
{'volume_name': new_volume_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s expand failed") %
|
log_err_msg = "Volume : %s expand failed" % new_volume_name
|
||||||
new_volume_name)
|
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -825,16 +819,16 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
self.volume_obj.delete(full_project_name, name, sync=True)
|
self.volume_obj.delete(full_project_name, name, sync=True)
|
||||||
except coprhd_utils.CoprHdError as e:
|
except coprhd_utils.CoprHdError as e:
|
||||||
if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR:
|
if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Volume %s"
|
"Volume %s"
|
||||||
" no longer exists; volume deletion is"
|
" no longer exists; volume deletion is"
|
||||||
" considered successful."), name)
|
" considered successful.", name)
|
||||||
else:
|
else:
|
||||||
coprhd_err_msg = (_("Volume %(name)s: delete failed"
|
coprhd_err_msg = (_("Volume %(name)s: delete failed"
|
||||||
"\n%(err)s") %
|
"\n%(err)s") %
|
||||||
{'name': name, 'err': six.text_type(e.msg)})
|
{'name': name, 'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s delete failed") % name)
|
log_err_msg = "Volume : %s delete failed" % name
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -851,7 +845,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
_("Snapshot can't be taken individually on a volume"
|
_("Snapshot can't be taken individually on a volume"
|
||||||
" that is part of a Consistency Group"))
|
" that is part of a Consistency Group"))
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.info(_LI("No Consistency Group associated with the volume"))
|
LOG.info("No Consistency Group associated with the volume")
|
||||||
|
|
||||||
if self.configuration.coprhd_emulate_snapshot:
|
if self.configuration.coprhd_emulate_snapshot:
|
||||||
self.create_cloned_volume(snapshot, volume, truncate_name)
|
self.create_cloned_volume(snapshot, volume, truncate_name)
|
||||||
@ -899,7 +893,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
"\n%(err)s") % {'snapshotname': snapshotname,
|
"\n%(err)s") % {'snapshotname': snapshotname,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Snapshot : %s create failed") % snapshotname)
|
log_err_msg = "Snapshot : %s create failed" % snapshotname
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -916,7 +910,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
_("Snapshot delete can't be done individually on a volume"
|
_("Snapshot delete can't be done individually on a volume"
|
||||||
" that is part of a Consistency Group"))
|
" that is part of a Consistency Group"))
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.info(_LI("No Consistency Group associated with the volume"))
|
LOG.info("No Consistency Group associated with the volume")
|
||||||
|
|
||||||
if self.configuration.coprhd_emulate_snapshot:
|
if self.configuration.coprhd_emulate_snapshot:
|
||||||
self.delete_volume(snapshot)
|
self.delete_volume(snapshot)
|
||||||
@ -936,10 +930,10 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
project=projectname,
|
project=projectname,
|
||||||
tenant=tenantname)
|
tenant=tenantname)
|
||||||
if resource_uri is None:
|
if resource_uri is None:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Snapshot %s"
|
"Snapshot %s"
|
||||||
" is not found; snapshot deletion"
|
" is not found; snapshot deletion"
|
||||||
" is considered successful."), snapshotname)
|
" is considered successful.", snapshotname)
|
||||||
else:
|
else:
|
||||||
snapshotname = self._get_coprhd_snapshot_name(
|
snapshotname = self._get_coprhd_snapshot_name(
|
||||||
snapshot, resource_uri)
|
snapshot, resource_uri)
|
||||||
@ -954,7 +948,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") %
|
coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") %
|
||||||
snapshotname)
|
snapshotname)
|
||||||
|
|
||||||
log_err_msg = (_LE("Snapshot : %s delete failed") % snapshotname)
|
log_err_msg = "Snapshot : %s delete failed" % snapshotname
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
|
||||||
@ -976,11 +970,11 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
foundhostname = self._find_host(initiator_ports[i])
|
foundhostname = self._find_host(initiator_ports[i])
|
||||||
|
|
||||||
if foundhostname:
|
if foundhostname:
|
||||||
LOG.info(_LI("Found host %s"), foundhostname)
|
LOG.info("Found host %s", foundhostname)
|
||||||
break
|
break
|
||||||
|
|
||||||
if not foundhostname:
|
if not foundhostname:
|
||||||
LOG.error(_LE("Auto host creation not supported"))
|
LOG.error("Auto host creation not supported")
|
||||||
# create an export group for this host
|
# create an export group for this host
|
||||||
foundgroupname = foundhostname + 'SG'
|
foundgroupname = foundhostname + 'SG'
|
||||||
# create a unique name
|
# create a unique name
|
||||||
@ -1056,9 +1050,9 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
None,
|
None,
|
||||||
None)
|
None)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"No export group found for the host: %s"
|
"No export group found for the host: %s"
|
||||||
"; this is considered already detached."), hostname)
|
"; this is considered already detached.", hostname)
|
||||||
|
|
||||||
return itls
|
return itls
|
||||||
|
|
||||||
@ -1133,11 +1127,11 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
|
|
||||||
if itls is None:
|
if itls is None:
|
||||||
# No device number found after 10 tries; return an empty itl
|
# No device number found after 10 tries; return an empty itl
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"No device number has been found after 10 tries; "
|
"No device number has been found after 10 tries; "
|
||||||
"this likely indicates an unsuccessful attach of "
|
"this likely indicates an unsuccessful attach of "
|
||||||
"volume volumename=%(volumename)s to"
|
"volume volumename=%(volumename)s to"
|
||||||
" initiator initiator_ports=%(initiator_ports)s"),
|
" initiator initiator_ports=%(initiator_ports)s",
|
||||||
{'volumename': volumename,
|
{'volumename': volumename,
|
||||||
'initiator_ports': initiator_ports})
|
'initiator_ports': initiator_ports})
|
||||||
|
|
||||||
@ -1408,7 +1402,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
|
|
||||||
except coprhd_utils.CoprHdError:
|
except coprhd_utils.CoprHdError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Update volume stats failed"))
|
LOG.exception("Update volume stats failed")
|
||||||
|
|
||||||
@retry_wrapper
|
@retry_wrapper
|
||||||
def retype(self, ctxt, volume, new_type, diff, host):
|
def retype(self, ctxt, volume, new_type, diff, host):
|
||||||
@ -1434,7 +1428,7 @@ class EMCCoprHDDriverCommon(object):
|
|||||||
"\n%(err)s") % {'volume_name': volume_name,
|
"\n%(err)s") % {'volume_name': volume_name,
|
||||||
'err': six.text_type(e.msg)})
|
'err': six.text_type(e.msg)})
|
||||||
|
|
||||||
log_err_msg = (_LE("Volume : %s type update failed") %
|
log_err_msg = ("Volume : %s type update failed" %
|
||||||
volume_name)
|
volume_name)
|
||||||
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
|
||||||
log_err_msg)
|
log_err_msg)
|
||||||
|
@ -24,7 +24,6 @@ from six.moves import urllib
|
|||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _
|
||||||
from cinder.i18n import _LI
|
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.coprhd import common as coprhd_common
|
from cinder.volume.drivers.coprhd import common as coprhd_common
|
||||||
@ -266,7 +265,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" %
|
request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" %
|
||||||
(server_ip, six.text_type(server_port), ip_double_encoded))
|
(server_ip, six.text_type(server_port), ip_double_encoded))
|
||||||
|
|
||||||
LOG.info(_LI("ScaleIO get client id by ip request: %s"), request)
|
LOG.info("ScaleIO get client id by ip request: %s", request)
|
||||||
|
|
||||||
if self.configuration.scaleio_verify_server_certificate:
|
if self.configuration.scaleio_verify_server_certificate:
|
||||||
verify_cert = self.configuration.scaleio_server_certificate_path
|
verify_cert = self.configuration.scaleio_server_certificate_path
|
||||||
@ -292,7 +291,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
|
|||||||
'message']})
|
'message']})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
LOG.info(_LI("ScaleIO sdc id is %s"), sdc_id)
|
LOG.info("ScaleIO sdc id is %s", sdc_id)
|
||||||
return sdc_id
|
return sdc_id
|
||||||
|
|
||||||
def _check_response(self, response, request,
|
def _check_response(self, response, request,
|
||||||
@ -300,7 +299,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
|
|||||||
server_username, server_password):
|
server_username, server_password):
|
||||||
if response.status_code == 401 or response.status_code == 403:
|
if response.status_code == 401 or response.status_code == 403:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("Token is invalid, going to re-login and get a new one"))
|
"Token is invalid, going to re-login and get a new one")
|
||||||
|
|
||||||
login_request = ("https://%s:%s/api/login" %
|
login_request = ("https://%s:%s/api/login" %
|
||||||
(server_ip, six.text_type(server_port)))
|
(server_ip, six.text_type(server_port)))
|
||||||
@ -317,7 +316,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
|
|||||||
token = r.json()
|
token = r.json()
|
||||||
self.server_token = token
|
self.server_token = token
|
||||||
# repeat request with valid token
|
# repeat request with valid token
|
||||||
LOG.info(_LI("Going to perform request again %s with valid token"),
|
LOG.info("Going to perform request again %s with valid token",
|
||||||
request)
|
request)
|
||||||
res = requests.get(
|
res = requests.get(
|
||||||
request, auth=(server_username, self.server_token),
|
request, auth=(server_username, self.server_token),
|
||||||
|
@ -24,8 +24,8 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from cinder.i18n import _, _LI, _LW, _LE
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
|
from cinder.i18n import _
|
||||||
from cinder.volume import utils as volutils
|
from cinder.volume import utils as volutils
|
||||||
|
|
||||||
import cinder.volume.drivers.datera.datera_common as datc
|
import cinder.volume.drivers.datera.datera_common as datc
|
||||||
@ -98,8 +98,8 @@ class DateraApi(object):
|
|||||||
policies = self._get_policies_for_resource(volume)
|
policies = self._get_policies_for_resource(volume)
|
||||||
template = policies['template']
|
template = policies['template']
|
||||||
if template:
|
if template:
|
||||||
LOG.warning(_LW("Volume size not extended due to template binding:"
|
LOG.warning("Volume size not extended due to template binding:"
|
||||||
" volume: %(volume)s, template: %(template)s"),
|
" volume: %(volume)s, template: %(template)s",
|
||||||
volume=volume, template=template)
|
volume=volume, template=template)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -164,9 +164,9 @@ class DateraApi(object):
|
|||||||
method='delete',
|
method='delete',
|
||||||
api_version='2')
|
api_version='2')
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to delete volume %s, but it was not found in the "
|
LOG.info("Tried to delete volume %s, but it was not found in the "
|
||||||
"Datera cluster. Continuing with delete.")
|
"Datera cluster. Continuing with delete.",
|
||||||
LOG.info(msg, datc._get_name(volume['id']))
|
datc._get_name(volume['id']))
|
||||||
|
|
||||||
# =================
|
# =================
|
||||||
# = Ensure Export =
|
# = Ensure Export =
|
||||||
@ -341,8 +341,8 @@ class DateraApi(object):
|
|||||||
self._issue_api_request(url, method='put', body=data,
|
self._issue_api_request(url, method='put', body=data,
|
||||||
api_version='2')
|
api_version='2')
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to detach volume %s, but it was not found in the "
|
msg = ("Tried to detach volume %s, but it was not found in the "
|
||||||
"Datera cluster. Continuing with detach.")
|
"Datera cluster. Continuing with detach.")
|
||||||
LOG.info(msg, volume['id'])
|
LOG.info(msg, volume['id'])
|
||||||
# TODO(_alastor_): Make acl cleaning multi-attach aware
|
# TODO(_alastor_): Make acl cleaning multi-attach aware
|
||||||
self._clean_acl_2(volume)
|
self._clean_acl_2(volume)
|
||||||
@ -436,8 +436,8 @@ class DateraApi(object):
|
|||||||
else:
|
else:
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to delete snapshot %s, but was not found in "
|
msg = ("Tried to delete snapshot %s, but was not found in "
|
||||||
"Datera cluster. Continuing with delete.")
|
"Datera cluster. Continuing with delete.")
|
||||||
LOG.info(msg, datc._get_name(snapshot['id']))
|
LOG.info(msg, datc._get_name(snapshot['id']))
|
||||||
|
|
||||||
# ========================
|
# ========================
|
||||||
@ -610,8 +610,8 @@ class DateraApi(object):
|
|||||||
results = self._issue_api_request('system', api_version='2')
|
results = self._issue_api_request('system', api_version='2')
|
||||||
|
|
||||||
if 'uuid' not in results:
|
if 'uuid' not in results:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
'Failed to get updated stats from Datera Cluster.'))
|
'Failed to get updated stats from Datera Cluster.')
|
||||||
|
|
||||||
backend_name = self.configuration.safe_get(
|
backend_name = self.configuration.safe_get(
|
||||||
'volume_backend_name')
|
'volume_backend_name')
|
||||||
@ -629,8 +629,7 @@ class DateraApi(object):
|
|||||||
|
|
||||||
self.cluster_stats = stats
|
self.cluster_stats = stats
|
||||||
except exception.DateraAPIException:
|
except exception.DateraAPIException:
|
||||||
LOG.error(_LE('Failed to get updated stats from Datera '
|
LOG.error('Failed to get updated stats from Datera cluster.')
|
||||||
'cluster.'))
|
|
||||||
return self.cluster_stats
|
return self.cluster_stats
|
||||||
|
|
||||||
def _is_manageable(self, app_inst):
|
def _is_manageable(self, app_inst):
|
||||||
@ -662,10 +661,10 @@ class DateraApi(object):
|
|||||||
self.datera_api_token = results['key']
|
self.datera_api_token = results['key']
|
||||||
except exception.NotAuthorized:
|
except exception.NotAuthorized:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Logging into the Datera cluster failed. Please '
|
LOG.error('Logging into the Datera cluster failed. Please '
|
||||||
'check your username and password set in the '
|
'check your username and password set in the '
|
||||||
'cinder.conf and start the cinder-volume '
|
'cinder.conf and start the cinder-volume '
|
||||||
'service again.'))
|
'service again.')
|
||||||
|
|
||||||
# ===========
|
# ===========
|
||||||
# = Polling =
|
# = Polling =
|
||||||
|
@ -23,8 +23,8 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from cinder.i18n import _, _LI, _LW, _LE
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
|
from cinder.i18n import _
|
||||||
from cinder.volume import utils as volutils
|
from cinder.volume import utils as volutils
|
||||||
|
|
||||||
import cinder.volume.drivers.datera.datera_common as datc
|
import cinder.volume.drivers.datera.datera_common as datc
|
||||||
@ -104,8 +104,8 @@ class DateraApi(object):
|
|||||||
policies = self._get_policies_for_resource(volume)
|
policies = self._get_policies_for_resource(volume)
|
||||||
template = policies['template']
|
template = policies['template']
|
||||||
if template:
|
if template:
|
||||||
LOG.warning(_LW("Volume size not extended due to template binding:"
|
LOG.warning("Volume size not extended due to template binding:"
|
||||||
" volume: %(volume)s, template: %(template)s"),
|
" volume: %(volume)s, template: %(template)s",
|
||||||
volume=volume, template=template)
|
volume=volume, template=template)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -184,8 +184,8 @@ class DateraApi(object):
|
|||||||
api_version='2.1',
|
api_version='2.1',
|
||||||
tenant=tenant)
|
tenant=tenant)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to delete volume %s, but it was not found in the "
|
msg = ("Tried to delete volume %s, but it was not found in the "
|
||||||
"Datera cluster. Continuing with delete.")
|
"Datera cluster. Continuing with delete.")
|
||||||
LOG.info(msg, datc._get_name(volume['id']))
|
LOG.info(msg, datc._get_name(volume['id']))
|
||||||
|
|
||||||
# =================
|
# =================
|
||||||
@ -378,8 +378,8 @@ class DateraApi(object):
|
|||||||
self._issue_api_request(url, method='put', body=data,
|
self._issue_api_request(url, method='put', body=data,
|
||||||
api_version='2.1', tenant=tenant)
|
api_version='2.1', tenant=tenant)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to detach volume %s, but it was not found in the "
|
msg = ("Tried to detach volume %s, but it was not found in the "
|
||||||
"Datera cluster. Continuing with detach.")
|
"Datera cluster. Continuing with detach.")
|
||||||
LOG.info(msg, volume['id'])
|
LOG.info(msg, volume['id'])
|
||||||
# TODO(_alastor_): Make acl cleaning multi-attach aware
|
# TODO(_alastor_): Make acl cleaning multi-attach aware
|
||||||
self._clean_acl_2_1(volume, tenant)
|
self._clean_acl_2_1(volume, tenant)
|
||||||
@ -481,8 +481,8 @@ class DateraApi(object):
|
|||||||
else:
|
else:
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _LI("Tried to delete snapshot %s, but was not found in "
|
msg = ("Tried to delete snapshot %s, but was not found in "
|
||||||
"Datera cluster. Continuing with delete.")
|
"Datera cluster. Continuing with delete.")
|
||||||
LOG.info(msg, datc._get_name(snapshot['id']))
|
LOG.info(msg, datc._get_name(snapshot['id']))
|
||||||
|
|
||||||
# ========================
|
# ========================
|
||||||
@ -772,10 +772,10 @@ class DateraApi(object):
|
|||||||
self.datera_api_token = results['key']
|
self.datera_api_token = results['key']
|
||||||
except exception.NotAuthorized:
|
except exception.NotAuthorized:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Logging into the Datera cluster failed. Please '
|
LOG.error('Logging into the Datera cluster failed. Please '
|
||||||
'check your username and password set in the '
|
'check your username and password set in the '
|
||||||
'cinder.conf and start the cinder-volume '
|
'cinder.conf and start the cinder-volume '
|
||||||
'service again.'))
|
'service again.')
|
||||||
|
|
||||||
# ===========
|
# ===========
|
||||||
# = Polling =
|
# = Polling =
|
||||||
@ -834,8 +834,8 @@ class DateraApi(object):
|
|||||||
'system', api_version='2.1')['data']
|
'system', api_version='2.1')['data']
|
||||||
|
|
||||||
if 'uuid' not in results:
|
if 'uuid' not in results:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
'Failed to get updated stats from Datera Cluster.'))
|
'Failed to get updated stats from Datera Cluster.')
|
||||||
|
|
||||||
backend_name = self.configuration.safe_get(
|
backend_name = self.configuration.safe_get(
|
||||||
'volume_backend_name')
|
'volume_backend_name')
|
||||||
@ -854,8 +854,7 @@ class DateraApi(object):
|
|||||||
|
|
||||||
self.cluster_stats = stats
|
self.cluster_stats = stats
|
||||||
except exception.DateraAPIException:
|
except exception.DateraAPIException:
|
||||||
LOG.error(_LE('Failed to get updated stats from Datera '
|
LOG.error('Failed to get updated stats from Datera cluster.')
|
||||||
'cluster.'))
|
|
||||||
return self.cluster_stats
|
return self.cluster_stats
|
||||||
|
|
||||||
# =======
|
# =======
|
||||||
|
@ -21,7 +21,7 @@ import time
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LE
|
from cinder.i18n import _
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -156,7 +156,7 @@ def _api_lookup(func):
|
|||||||
name = "_" + "_".join(
|
name = "_" + "_".join(
|
||||||
(func.__name__, api_version.replace(".", "_")))
|
(func.__name__, api_version.replace(".", "_")))
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Trying method: %s"), name)
|
LOG.info("Trying method: %s", name)
|
||||||
return getattr(obj, name)(*args[1:], **kwargs)
|
return getattr(obj, name)(*args[1:], **kwargs)
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
# If we find the attribute name in the error message
|
# If we find the attribute name in the error message
|
||||||
@ -206,6 +206,6 @@ def _get_supported_api_versions(driver):
|
|||||||
str(resp.json().get("code")) == "99"):
|
str(resp.json().get("code")) == "99"):
|
||||||
results.append(version)
|
results.append(version)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("No supported API versions available, "
|
LOG.error("No supported API versions available, "
|
||||||
"Please upgrade your Datera EDF software"))
|
"Please upgrade your Datera EDF software")
|
||||||
return results
|
return results
|
||||||
|
@ -25,7 +25,7 @@ import six
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -152,15 +152,15 @@ class HttpClient(object):
|
|||||||
url = url + id
|
url = url + id
|
||||||
else:
|
else:
|
||||||
# No hope.
|
# No hope.
|
||||||
LOG.error(_LE('_get_async_url: Bogus return async task %r'),
|
LOG.error('_get_async_url: Bogus return async task %r',
|
||||||
asyncTask)
|
asyncTask)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
message=_('_get_async_url: Invalid URL.'))
|
message=_('_get_async_url: Invalid URL.'))
|
||||||
|
|
||||||
# Check for an odd error case
|
# Check for an odd error case
|
||||||
if url.startswith('<') and url.endswith('>'):
|
if url.startswith('<') and url.endswith('>'):
|
||||||
LOG.error(_LE('_get_async_url: Malformed URL '
|
LOG.error('_get_async_url: Malformed URL (XML returned). (%r)',
|
||||||
'(XML returned). (%r)'), asyncTask)
|
asyncTask)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
message=_('_get_async_url: Malformed URL.'))
|
message=_('_get_async_url: Malformed URL.'))
|
||||||
|
|
||||||
@ -308,8 +308,8 @@ class StorageCenterApiHelper(object):
|
|||||||
self.san_login = self.config.secondary_san_login
|
self.san_login = self.config.secondary_san_login
|
||||||
self.san_password = self.config.secondary_san_password
|
self.san_password = self.config.secondary_san_password
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Swapping DSM credentials: Secondary DSM '
|
LOG.info('Swapping DSM credentials: Secondary DSM '
|
||||||
'credentials are not set or are incomplete.'))
|
'credentials are not set or are incomplete.')
|
||||||
# Cannot swap.
|
# Cannot swap.
|
||||||
return False
|
return False
|
||||||
# Odds on this hasn't changed so no need to make setting this a
|
# Odds on this hasn't changed so no need to make setting this a
|
||||||
@ -322,7 +322,7 @@ class StorageCenterApiHelper(object):
|
|||||||
self.san_login = self.config.san_login
|
self.san_login = self.config.san_login
|
||||||
self.san_password = self.config.san_password
|
self.san_password = self.config.san_password
|
||||||
self.san_port = self.config.dell_sc_api_port
|
self.san_port = self.config.dell_sc_api_port
|
||||||
LOG.info(_LI('Swapping DSM credentials: New DSM IP is %r.'),
|
LOG.info('Swapping DSM credentials: New DSM IP is %r.',
|
||||||
self.san_ip)
|
self.san_ip)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -363,7 +363,7 @@ class StorageCenterApiHelper(object):
|
|||||||
:raises: VolumeBackendAPIException
|
:raises: VolumeBackendAPIException
|
||||||
"""
|
"""
|
||||||
connection = None
|
connection = None
|
||||||
LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'),
|
LOG.info('open_connection to %(ssn)s at %(ip)s',
|
||||||
{'ssn': self.primaryssn,
|
{'ssn': self.primaryssn,
|
||||||
'ip': self.config.san_ip})
|
'ip': self.config.san_ip})
|
||||||
if self.primaryssn:
|
if self.primaryssn:
|
||||||
@ -376,11 +376,11 @@ class StorageCenterApiHelper(object):
|
|||||||
connection = self._setup_connection()
|
connection = self._setup_connection()
|
||||||
else:
|
else:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to connect to the API. '
|
LOG.error('Failed to connect to the API. '
|
||||||
'No backup DSM provided.'))
|
'No backup DSM provided.')
|
||||||
# Save our api version for next time.
|
# Save our api version for next time.
|
||||||
if self.apiversion != connection.apiversion:
|
if self.apiversion != connection.apiversion:
|
||||||
LOG.info(_LI('open_connection: Updating API version to %s'),
|
LOG.info('open_connection: Updating API version to %s',
|
||||||
connection.apiversion)
|
connection.apiversion)
|
||||||
self.apiversion = connection.apiversion
|
self.apiversion = connection.apiversion
|
||||||
|
|
||||||
@ -488,7 +488,7 @@ class StorageCenterApi(object):
|
|||||||
'reason': rest_response.reason,
|
'reason': rest_response.reason,
|
||||||
'text': response_text})
|
'text': response_text})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Failed to get REST call result.'))
|
LOG.warning('Failed to get REST call result.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -563,12 +563,11 @@ class StorageCenterApi(object):
|
|||||||
try:
|
try:
|
||||||
return blob.json()
|
return blob.json()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.error(_LE('Error invalid json: %s'),
|
LOG.error('Error invalid json: %s', blob)
|
||||||
blob)
|
|
||||||
except TypeError as ex:
|
except TypeError as ex:
|
||||||
LOG.error(_LE('Error TypeError. %s'), ex)
|
LOG.error('Error TypeError. %s', ex)
|
||||||
except scanner.JSONDecodeError as ex:
|
except scanner.JSONDecodeError as ex:
|
||||||
LOG.error(_LE('Error JSONDecodeError. %s'), ex)
|
LOG.error('Error JSONDecodeError. %s', ex)
|
||||||
# We are here so this went poorly. Log our blob.
|
# We are here so this went poorly. Log our blob.
|
||||||
LOG.debug('_get_json blob %s', blob)
|
LOG.debug('_get_json blob %s', blob)
|
||||||
return None
|
return None
|
||||||
@ -583,12 +582,11 @@ class StorageCenterApi(object):
|
|||||||
if isinstance(blob, dict):
|
if isinstance(blob, dict):
|
||||||
return blob.get('instanceId')
|
return blob.get('instanceId')
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.error(_LE('Invalid API object: %s'),
|
LOG.error('Invalid API object: %s', blob)
|
||||||
blob)
|
|
||||||
except TypeError as ex:
|
except TypeError as ex:
|
||||||
LOG.error(_LE('Error TypeError. %s'), ex)
|
LOG.error('Error TypeError. %s', ex)
|
||||||
except scanner.JSONDecodeError as ex:
|
except scanner.JSONDecodeError as ex:
|
||||||
LOG.error(_LE('Error JSONDecodeError. %s'), ex)
|
LOG.error('Error JSONDecodeError. %s', ex)
|
||||||
LOG.debug('_get_id failed: blob %s', blob)
|
LOG.debug('_get_id failed: blob %s', blob)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -617,7 +615,7 @@ class StorageCenterApi(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# We don't care what failed. The clues are already in the logs.
|
# We don't care what failed. The clues are already in the logs.
|
||||||
# Just log a parsing error and move on.
|
# Just log a parsing error and move on.
|
||||||
LOG.error(_LE('_check_version_fail: Parsing error.'))
|
LOG.error('_check_version_fail: Parsing error.')
|
||||||
# Just eat this if it isn't a version error.
|
# Just eat this if it isn't a version error.
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@ -662,7 +660,7 @@ class StorageCenterApi(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# Good return but not the login response we were expecting.
|
# Good return but not the login response we were expecting.
|
||||||
# Log it and error out.
|
# Log it and error out.
|
||||||
LOG.error(_LE('Unrecognized Login Response: %s'), r)
|
LOG.error('Unrecognized Login Response: %s', r)
|
||||||
|
|
||||||
def close_connection(self):
|
def close_connection(self):
|
||||||
"""Logout of Dell REST API."""
|
"""Logout of Dell REST API."""
|
||||||
@ -691,7 +689,7 @@ class StorageCenterApi(object):
|
|||||||
'%(pid)r not valid on %(ssn)r',
|
'%(pid)r not valid on %(ssn)r',
|
||||||
{'pid': provider_id, 'ssn': self.ssn})
|
{'pid': provider_id, 'ssn': self.ssn})
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('_use_provider_id: provider_id %s is invalid!'),
|
LOG.error('_use_provider_id: provider_id %s is invalid!',
|
||||||
provider_id)
|
provider_id)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -708,7 +706,7 @@ class StorageCenterApi(object):
|
|||||||
r = self.client.get('StorageCenter/StorageCenter')
|
r = self.client.get('StorageCenter/StorageCenter')
|
||||||
result = self._get_result(r, 'scSerialNumber', ssn)
|
result = self._get_result(r, 'scSerialNumber', ssn)
|
||||||
if result is None:
|
if result is None:
|
||||||
LOG.error(_LE('Failed to find %(s)s. Result %(r)s'),
|
LOG.error('Failed to find %(s)s. Result %(r)s',
|
||||||
{'s': ssn,
|
{'s': ssn,
|
||||||
'r': r})
|
'r': r})
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
@ -779,7 +777,7 @@ class StorageCenterApi(object):
|
|||||||
scfolder = self._create_folder(url, instanceId, folder, ssn)
|
scfolder = self._create_folder(url, instanceId, folder, ssn)
|
||||||
# If we haven't found a folder or created it then leave
|
# If we haven't found a folder or created it then leave
|
||||||
if scfolder is None:
|
if scfolder is None:
|
||||||
LOG.error(_LE('Unable to create folder path %s'), folderpath)
|
LOG.error('Unable to create folder path %s', folderpath)
|
||||||
break
|
break
|
||||||
# Next part of the path will need this
|
# Next part of the path will need this
|
||||||
instanceId = self._get_id(scfolder)
|
instanceId = self._get_id(scfolder)
|
||||||
@ -878,9 +876,9 @@ class StorageCenterApi(object):
|
|||||||
# has likely been attempted before the volume has been instantiated
|
# has likely been attempted before the volume has been instantiated
|
||||||
# on the Storage Center. In the real world no one will snapshot
|
# on the Storage Center. In the real world no one will snapshot
|
||||||
# a volume without first putting some data in that volume.
|
# a volume without first putting some data in that volume.
|
||||||
LOG.warning(_LW('Volume %(name)s initialization failure. '
|
LOG.warning('Volume %(name)s initialization failure. '
|
||||||
'Operations such as snapshot and clone may fail due '
|
'Operations such as snapshot and clone may fail due '
|
||||||
'to inactive volume.)'), {'name': scvolume['name']})
|
'to inactive volume.)', {'name': scvolume['name']})
|
||||||
|
|
||||||
def _find_storage_profile(self, storage_profile):
|
def _find_storage_profile(self, storage_profile):
|
||||||
"""Looks for a Storage Profile on the array.
|
"""Looks for a Storage Profile on the array.
|
||||||
@ -1066,7 +1064,7 @@ class StorageCenterApi(object):
|
|||||||
|
|
||||||
# If we actually have a place to put our volume create it
|
# If we actually have a place to put our volume create it
|
||||||
if folder is None:
|
if folder is None:
|
||||||
LOG.warning(_LW('Unable to create folder %s'), self.vfname)
|
LOG.warning('Unable to create folder %s', self.vfname)
|
||||||
|
|
||||||
# Find our replay_profiles.
|
# Find our replay_profiles.
|
||||||
addids, removeids = self._find_replay_profiles(replay_profile_string)
|
addids, removeids = self._find_replay_profiles(replay_profile_string)
|
||||||
@ -1108,17 +1106,17 @@ class StorageCenterApi(object):
|
|||||||
# Our volume should be in the return.
|
# Our volume should be in the return.
|
||||||
scvolume = self._get_json(r)
|
scvolume = self._get_json(r)
|
||||||
if scvolume:
|
if scvolume:
|
||||||
LOG.info(_LI('Created volume %(instanceId)s: %(name)s'),
|
LOG.info('Created volume %(instanceId)s: %(name)s',
|
||||||
{'instanceId': scvolume['instanceId'],
|
{'instanceId': scvolume['instanceId'],
|
||||||
'name': scvolume['name']})
|
'name': scvolume['name']})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('ScVolume returned success with empty payload.'
|
LOG.error('ScVolume returned success with empty payload.'
|
||||||
' Attempting to locate volume'))
|
' Attempting to locate volume')
|
||||||
# In theory it is there since success was returned.
|
# In theory it is there since success was returned.
|
||||||
# Try one last time to find it before returning.
|
# Try one last time to find it before returning.
|
||||||
scvolume = self._search_for_volume(name)
|
scvolume = self._search_for_volume(name)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to create volume on SC: %s'), name)
|
LOG.error('Unable to create volume on SC: %s', name)
|
||||||
|
|
||||||
return scvolume
|
return scvolume
|
||||||
|
|
||||||
@ -1170,8 +1168,7 @@ class StorageCenterApi(object):
|
|||||||
# if there is no live volume then we return our provider_id.
|
# if there is no live volume then we return our provider_id.
|
||||||
primary_id = provider_id
|
primary_id = provider_id
|
||||||
lv = self.get_live_volume(provider_id, name)
|
lv = self.get_live_volume(provider_id, name)
|
||||||
LOG.info(_LI('Volume %(name)r, '
|
LOG.info('Volume %(name)r, id %(provider)s at primary %(primary)s.',
|
||||||
'id %(provider)s at primary %(primary)s.'),
|
|
||||||
{'name': name,
|
{'name': name,
|
||||||
'provider': provider_id,
|
'provider': provider_id,
|
||||||
'primary': primary_id})
|
'primary': primary_id})
|
||||||
@ -1180,7 +1177,7 @@ class StorageCenterApi(object):
|
|||||||
if lv and (self.is_swapped(provider_id, lv) and not self.failed_over
|
if lv and (self.is_swapped(provider_id, lv) and not self.failed_over
|
||||||
and self._autofailback(lv)):
|
and self._autofailback(lv)):
|
||||||
lv = self.get_live_volume(provider_id)
|
lv = self.get_live_volume(provider_id)
|
||||||
LOG.info(_LI('After failback %s'), lv)
|
LOG.info('After failback %s', lv)
|
||||||
# Make sure we still have a LV.
|
# Make sure we still have a LV.
|
||||||
if lv:
|
if lv:
|
||||||
# At this point if the secondaryRole is Active we have
|
# At this point if the secondaryRole is Active we have
|
||||||
@ -1226,7 +1223,7 @@ class StorageCenterApi(object):
|
|||||||
msg = (_('Unable to complete failover of %s.')
|
msg = (_('Unable to complete failover of %s.')
|
||||||
% name)
|
% name)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
LOG.info(_LI('Imported %(fail)s to %(guid)s.'),
|
LOG.info('Imported %(fail)s to %(guid)s.',
|
||||||
{'fail': self._repl_name(name),
|
{'fail': self._repl_name(name),
|
||||||
'guid': name})
|
'guid': name})
|
||||||
else:
|
else:
|
||||||
@ -1313,8 +1310,8 @@ class StorageCenterApi(object):
|
|||||||
return self._get_json(r)
|
return self._get_json(r)
|
||||||
|
|
||||||
# If we can't find the volume then it is effectively gone.
|
# If we can't find the volume then it is effectively gone.
|
||||||
LOG.warning(_LW('delete_volume: unable to find volume '
|
LOG.warning('delete_volume: unable to find volume '
|
||||||
'provider_id: %s'), provider_id)
|
'provider_id: %s', provider_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _find_server_folder(self, create=False, ssn=-1):
|
def _find_server_folder(self, create=False, ssn=-1):
|
||||||
@ -1354,7 +1351,7 @@ class StorageCenterApi(object):
|
|||||||
r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba'
|
r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba'
|
||||||
% self._get_id(scserver), payload, True)
|
% self._get_id(scserver), payload, True)
|
||||||
if not self._check_result(r):
|
if not self._check_result(r):
|
||||||
LOG.error(_LE('_add_hba error: %(wwn)s to %(srvname)s'),
|
LOG.error('_add_hba error: %(wwn)s to %(srvname)s',
|
||||||
{'wwn': wwnoriscsiname,
|
{'wwn': wwnoriscsiname,
|
||||||
'srvname': scserver['name']})
|
'srvname': scserver['name']})
|
||||||
return False
|
return False
|
||||||
@ -1385,7 +1382,7 @@ class StorageCenterApi(object):
|
|||||||
# Found it return the id
|
# Found it return the id
|
||||||
return self._get_id(srvos)
|
return self._get_id(srvos)
|
||||||
|
|
||||||
LOG.warning(_LW('Unable to find appropriate OS %s'), osname)
|
LOG.warning('Unable to find appropriate OS %s', osname)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -1412,7 +1409,7 @@ class StorageCenterApi(object):
|
|||||||
for wwn in wwnlist:
|
for wwn in wwnlist:
|
||||||
if not self._add_hba(scserver, wwn):
|
if not self._add_hba(scserver, wwn):
|
||||||
# We failed so log it. Delete our server and return None.
|
# We failed so log it. Delete our server and return None.
|
||||||
LOG.error(_LE('Error adding HBA %s to server'), wwn)
|
LOG.error('Error adding HBA %s to server', wwn)
|
||||||
self._delete_server(scserver)
|
self._delete_server(scserver)
|
||||||
return None
|
return None
|
||||||
return scserver
|
return scserver
|
||||||
@ -1420,7 +1417,7 @@ class StorageCenterApi(object):
|
|||||||
def _create_server(self, servername, folder, serveros, ssn):
|
def _create_server(self, servername, folder, serveros, ssn):
|
||||||
ssn = self._vet_ssn(ssn)
|
ssn = self._vet_ssn(ssn)
|
||||||
|
|
||||||
LOG.info(_LI('Creating server %s'), servername)
|
LOG.info('Creating server %s', servername)
|
||||||
payload = {}
|
payload = {}
|
||||||
payload['Name'] = servername
|
payload['Name'] = servername
|
||||||
payload['StorageCenter'] = ssn
|
payload['StorageCenter'] = ssn
|
||||||
@ -1445,9 +1442,9 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
# Server was created
|
# Server was created
|
||||||
scserver = self._first_result(r)
|
scserver = self._first_result(r)
|
||||||
LOG.info(_LI('SC server created %s'), scserver)
|
LOG.info('SC server created %s', scserver)
|
||||||
return scserver
|
return scserver
|
||||||
LOG.error(_LE('Unable to create SC server %s'), servername)
|
LOG.error('Unable to create SC server %s', servername)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _vet_ssn(self, ssn):
|
def _vet_ssn(self, ssn):
|
||||||
@ -1529,7 +1526,7 @@ class StorageCenterApi(object):
|
|||||||
domains = self._get_json(r)
|
domains = self._get_json(r)
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
LOG.error(_LE('Error getting FaultDomainList for %s'), cportid)
|
LOG.error('Error getting FaultDomainList for %s', cportid)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _find_initiators(self, scserver):
|
def _find_initiators(self, scserver):
|
||||||
@ -1549,7 +1546,7 @@ class StorageCenterApi(object):
|
|||||||
wwn is not None):
|
wwn is not None):
|
||||||
initiators.append(wwn)
|
initiators.append(wwn)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to find initiators'))
|
LOG.error('Unable to find initiators')
|
||||||
LOG.debug('_find_initiators: %s', initiators)
|
LOG.debug('_find_initiators: %s', initiators)
|
||||||
return initiators
|
return initiators
|
||||||
|
|
||||||
@ -1580,8 +1577,8 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
mappings = self._get_json(r)
|
mappings = self._get_json(r)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('_find_mappings: volume is not active'))
|
LOG.error('_find_mappings: volume is not active')
|
||||||
LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'),
|
LOG.info('Volume mappings for %(name)s: %(mappings)s',
|
||||||
{'name': scvolume.get('name'),
|
{'name': scvolume.get('name'),
|
||||||
'mappings': mappings})
|
'mappings': mappings})
|
||||||
return mappings
|
return mappings
|
||||||
@ -1598,7 +1595,7 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
mapping_profiles = self._get_json(r)
|
mapping_profiles = self._get_json(r)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to find mapping profiles: %s'),
|
LOG.error('Unable to find mapping profiles: %s',
|
||||||
scvolume.get('name'))
|
scvolume.get('name'))
|
||||||
LOG.debug(mapping_profiles)
|
LOG.debug(mapping_profiles)
|
||||||
return mapping_profiles
|
return mapping_profiles
|
||||||
@ -1655,17 +1652,17 @@ class StorageCenterApi(object):
|
|||||||
if lun is None:
|
if lun is None:
|
||||||
lun = mappinglun
|
lun = mappinglun
|
||||||
elif lun != mappinglun:
|
elif lun != mappinglun:
|
||||||
LOG.warning(_LW('Inconsistent Luns.'))
|
LOG.warning('Inconsistent Luns.')
|
||||||
else:
|
else:
|
||||||
LOG.debug('%s not found in initiator list',
|
LOG.debug('%s not found in initiator list',
|
||||||
hbaname)
|
hbaname)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('_find_wwn: serverhba is None.'))
|
LOG.warning('_find_wwn: serverhba is None.')
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('_find_wwn: Unable to find port wwn.'))
|
LOG.warning('_find_wwn: Unable to find port wwn.')
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('_find_wwn: controllerport is None.'))
|
LOG.warning('_find_wwn: controllerport is None.')
|
||||||
LOG.info(_LI('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s'),
|
LOG.info('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s',
|
||||||
{'lun': lun,
|
{'lun': lun,
|
||||||
'wwn': wwns,
|
'wwn': wwns,
|
||||||
'map': itmap})
|
'map': itmap})
|
||||||
@ -1686,7 +1683,7 @@ class StorageCenterApi(object):
|
|||||||
controller = volconfig.get('controller')
|
controller = volconfig.get('controller')
|
||||||
actvctrl = self._get_id(controller)
|
actvctrl = self._get_id(controller)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to retrieve VolumeConfiguration: %s'),
|
LOG.error('Unable to retrieve VolumeConfiguration: %s',
|
||||||
self._get_id(scvolume))
|
self._get_id(scvolume))
|
||||||
LOG.debug('_find_active_controller: %s', actvctrl)
|
LOG.debug('_find_active_controller: %s', actvctrl)
|
||||||
return actvctrl
|
return actvctrl
|
||||||
@ -1731,8 +1728,8 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
controllerport = self._first_result(r)
|
controllerport = self._first_result(r)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('_find_controller_port_iscsi_config: '
|
LOG.error('_find_controller_port_iscsi_config: '
|
||||||
'Error finding configuration: %s'), cportid)
|
'Error finding configuration: %s', cportid)
|
||||||
return controllerport
|
return controllerport
|
||||||
|
|
||||||
def find_iscsi_properties(self, scvolume):
|
def find_iscsi_properties(self, scvolume):
|
||||||
@ -1904,7 +1901,7 @@ class StorageCenterApi(object):
|
|||||||
mprofiles = self._find_mapping_profiles(scvolume)
|
mprofiles = self._find_mapping_profiles(scvolume)
|
||||||
for mprofile in mprofiles:
|
for mprofile in mprofiles:
|
||||||
if self._get_id(mprofile.get('server')) == serverid:
|
if self._get_id(mprofile.get('server')) == serverid:
|
||||||
LOG.info(_LI('Volume %(vol)s already mapped to %(srv)s'),
|
LOG.info('Volume %(vol)s already mapped to %(srv)s',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'srv': scserver['name']})
|
'srv': scserver['name']})
|
||||||
return mprofile
|
return mprofile
|
||||||
@ -1916,13 +1913,13 @@ class StorageCenterApi(object):
|
|||||||
% volumeid, payload, True)
|
% volumeid, payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
# We just return our mapping
|
# We just return our mapping
|
||||||
LOG.info(_LI('Volume %(vol)s mapped to %(srv)s'),
|
LOG.info('Volume %(vol)s mapped to %(srv)s',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'srv': scserver['name']})
|
'srv': scserver['name']})
|
||||||
return self._first_result(r)
|
return self._first_result(r)
|
||||||
|
|
||||||
# Error out
|
# Error out
|
||||||
LOG.error(_LE('Unable to map %(vol)s to %(srv)s'),
|
LOG.error('Unable to map %(vol)s to %(srv)s',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'srv': scserver['name']})
|
'srv': scserver['name']})
|
||||||
return None
|
return None
|
||||||
@ -1956,12 +1953,12 @@ class StorageCenterApi(object):
|
|||||||
if result is True or (type(result) is dict and
|
if result is True or (type(result) is dict and
|
||||||
result.get('result')):
|
result.get('result')):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI('Volume %(vol)s unmapped from %(srv)s'),
|
'Volume %(vol)s unmapped from %(srv)s',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'srv': scserver['name']})
|
'srv': scserver['name']})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
LOG.error(_LE('Unable to unmap %(vol)s from %(srv)s'),
|
LOG.error('Unable to unmap %(vol)s from %(srv)s',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'srv': scserver['name']})
|
'srv': scserver['name']})
|
||||||
# 1 failed unmap is as good as 100.
|
# 1 failed unmap is as good as 100.
|
||||||
@ -2018,7 +2015,7 @@ class StorageCenterApi(object):
|
|||||||
|
|
||||||
# Quick double check.
|
# Quick double check.
|
||||||
if replay is None:
|
if replay is None:
|
||||||
LOG.warning(_LW('Unable to create snapshot %s'), replayid)
|
LOG.warning('Unable to create snapshot %s', replayid)
|
||||||
# Return replay or None.
|
# Return replay or None.
|
||||||
return replay
|
return replay
|
||||||
|
|
||||||
@ -2052,10 +2049,10 @@ class StorageCenterApi(object):
|
|||||||
# We found our replay so return it.
|
# We found our replay so return it.
|
||||||
return replay
|
return replay
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('Invalid ReplayList return: %s'),
|
LOG.error('Invalid ReplayList return: %s',
|
||||||
r)
|
r)
|
||||||
# If we are here then we didn't find the replay so warn and leave.
|
# If we are here then we didn't find the replay so warn and leave.
|
||||||
LOG.warning(_LW('Unable to find snapshot %s'),
|
LOG.warning('Unable to find snapshot %s',
|
||||||
replayid)
|
replayid)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
@ -2075,7 +2072,7 @@ class StorageCenterApi(object):
|
|||||||
self._get_id(screplay), payload, True)
|
self._get_id(screplay), payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
return True
|
return True
|
||||||
LOG.error(_LE('Error managing replay %s'),
|
LOG.error('Error managing replay %s',
|
||||||
screplay.get('description'))
|
screplay.get('description'))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -2092,7 +2089,7 @@ class StorageCenterApi(object):
|
|||||||
self._get_id(screplay), payload, True)
|
self._get_id(screplay), payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
return True
|
return True
|
||||||
LOG.error(_LE('Error unmanaging replay %s'),
|
LOG.error('Error unmanaging replay %s',
|
||||||
screplay.get('description'))
|
screplay.get('description'))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -2162,12 +2159,11 @@ class StorageCenterApi(object):
|
|||||||
# If we have a dr_profile to apply we should do so now.
|
# If we have a dr_profile to apply we should do so now.
|
||||||
if dr_profile and not self.update_datareduction_profile(volume,
|
if dr_profile and not self.update_datareduction_profile(volume,
|
||||||
dr_profile):
|
dr_profile):
|
||||||
LOG.error(_LE('Unable to apply %s to volume.'), dr_profile)
|
LOG.error('Unable to apply %s to volume.', dr_profile)
|
||||||
volume = None
|
volume = None
|
||||||
|
|
||||||
if volume is None:
|
if volume is None:
|
||||||
LOG.error(_LE('Unable to create volume %s from replay'),
|
LOG.error('Unable to create volume %s from replay', volname)
|
||||||
volname)
|
|
||||||
|
|
||||||
return volume
|
return volume
|
||||||
|
|
||||||
@ -2230,7 +2226,7 @@ class StorageCenterApi(object):
|
|||||||
:returns: The new volume's Dell volume object.
|
:returns: The new volume's Dell volume object.
|
||||||
:raises: VolumeBackendAPIException if error doing copy.
|
:raises: VolumeBackendAPIException if error doing copy.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('create_cloned_volume: Creating %(dst)s from %(src)s'),
|
LOG.info('create_cloned_volume: Creating %(dst)s from %(src)s',
|
||||||
{'dst': volumename,
|
{'dst': volumename,
|
||||||
'src': scvolume['name']})
|
'src': scvolume['name']})
|
||||||
|
|
||||||
@ -2273,7 +2269,7 @@ class StorageCenterApi(object):
|
|||||||
self.delete_volume(volumename, self._get_id(newvol))
|
self.delete_volume(volumename, self._get_id(newvol))
|
||||||
raise
|
raise
|
||||||
# Tell the user.
|
# Tell the user.
|
||||||
LOG.error(_LE('create_cloned_volume: Unable to clone volume'))
|
LOG.error('create_cloned_volume: Unable to clone volume')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def expand_volume(self, scvolume, newsize):
|
def expand_volume(self, scvolume, newsize):
|
||||||
@ -2296,7 +2292,7 @@ class StorageCenterApi(object):
|
|||||||
{'name': vol['name'],
|
{'name': vol['name'],
|
||||||
'size': vol['configuredSize']})
|
'size': vol['configuredSize']})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Error expanding volume %s.'), scvolume['name'])
|
LOG.error('Error expanding volume %s.', scvolume['name'])
|
||||||
return vol
|
return vol
|
||||||
|
|
||||||
def rename_volume(self, scvolume, name):
|
def rename_volume(self, scvolume, name):
|
||||||
@ -2316,7 +2312,7 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
LOG.error(_LE('Error renaming volume %(original)s to %(name)s'),
|
LOG.error('Error renaming volume %(original)s to %(name)s',
|
||||||
{'original': scvolume['name'],
|
{'original': scvolume['name'],
|
||||||
'name': name})
|
'name': name})
|
||||||
return False
|
return False
|
||||||
@ -2329,13 +2325,13 @@ class StorageCenterApi(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if not prefs.get(allowprefname):
|
if not prefs.get(allowprefname):
|
||||||
LOG.error(_LE('User does not have permission to change '
|
LOG.error('User does not have permission to change '
|
||||||
'%s selection.'), profiletype)
|
'%s selection.', profiletype)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if profilename:
|
if profilename:
|
||||||
if not profile:
|
if not profile:
|
||||||
LOG.error(_LE('%(ptype)s %(pname)s was not found.'),
|
LOG.error('%(ptype)s %(pname)s was not found.',
|
||||||
{'ptype': profiletype,
|
{'ptype': profiletype,
|
||||||
'pname': profilename})
|
'pname': profilename})
|
||||||
return False
|
return False
|
||||||
@ -2343,10 +2339,10 @@ class StorageCenterApi(object):
|
|||||||
# Going from specific profile to the user default
|
# Going from specific profile to the user default
|
||||||
profile = prefs.get(restname)
|
profile = prefs.get(restname)
|
||||||
if not profile and not continuewithoutdefault:
|
if not profile and not continuewithoutdefault:
|
||||||
LOG.error(_LE('Default %s was not found.'), profiletype)
|
LOG.error('Default %s was not found.', profiletype)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
LOG.info(_LI('Switching volume %(vol)s to profile %(prof)s.'),
|
LOG.info('Switching volume %(vol)s to profile %(prof)s.',
|
||||||
{'vol': scvolume['name'],
|
{'vol': scvolume['name'],
|
||||||
'prof': profile.get('name')})
|
'prof': profile.get('name')})
|
||||||
payload = {}
|
payload = {}
|
||||||
@ -2356,8 +2352,8 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
LOG.error(_LE('Error changing %(ptype)s for volume '
|
LOG.error('Error changing %(ptype)s for volume '
|
||||||
'%(original)s to %(name)s'),
|
'%(original)s to %(name)s',
|
||||||
{'ptype': profiletype,
|
{'ptype': profiletype,
|
||||||
'original': scvolume['name'],
|
'original': scvolume['name'],
|
||||||
'name': profilename})
|
'name': profilename})
|
||||||
@ -2467,7 +2463,7 @@ class StorageCenterApi(object):
|
|||||||
profilelist = self._get_json(r)
|
profilelist = self._get_json(r)
|
||||||
if profilelist:
|
if profilelist:
|
||||||
if len(profilelist) > 1:
|
if len(profilelist) > 1:
|
||||||
LOG.error(_LE('Multiple replay profiles under name %s'),
|
LOG.error('Multiple replay profiles under name %s',
|
||||||
name)
|
name)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=_('Multiple profiles found.'))
|
data=_('Multiple profiles found.'))
|
||||||
@ -2507,12 +2503,12 @@ class StorageCenterApi(object):
|
|||||||
r = self.client.delete('StorageCenter/ScReplayProfile/%s' %
|
r = self.client.delete('StorageCenter/ScReplayProfile/%s' %
|
||||||
self._get_id(profile), async=True)
|
self._get_id(profile), async=True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('Profile %s has been deleted.'),
|
LOG.info('Profile %s has been deleted.',
|
||||||
profile.get('name'))
|
profile.get('name'))
|
||||||
else:
|
else:
|
||||||
# We failed due to a failure to delete an existing profile.
|
# We failed due to a failure to delete an existing profile.
|
||||||
# This is reason to raise an exception.
|
# This is reason to raise an exception.
|
||||||
LOG.error(_LE('Unable to delete profile %s.'), profile.get('name'))
|
LOG.error('Unable to delete profile %s.', profile.get('name'))
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=_('Error deleting replay profile.'))
|
data=_('Error deleting replay profile.'))
|
||||||
|
|
||||||
@ -2580,9 +2576,9 @@ class StorageCenterApi(object):
|
|||||||
if (self._update_volume_profiles(scvolume,
|
if (self._update_volume_profiles(scvolume,
|
||||||
addid=profileid,
|
addid=profileid,
|
||||||
removeid=None)):
|
removeid=None)):
|
||||||
LOG.info(_LI('Added %s to cg.'), vol['id'])
|
LOG.info('Added %s to cg.', vol['id'])
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Failed to add %s to cg.'), vol['id'])
|
LOG.error('Failed to add %s to cg.', vol['id'])
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -2599,9 +2595,9 @@ class StorageCenterApi(object):
|
|||||||
if (self._update_volume_profiles(scvolume,
|
if (self._update_volume_profiles(scvolume,
|
||||||
addid=None,
|
addid=None,
|
||||||
removeid=profileid)):
|
removeid=profileid)):
|
||||||
LOG.info(_LI('Removed %s from cg.'), vol['id'])
|
LOG.info('Removed %s from cg.', vol['id'])
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Failed to remove %s from cg.'), vol['id'])
|
LOG.error('Failed to remove %s from cg.', vol['id'])
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -2622,10 +2618,10 @@ class StorageCenterApi(object):
|
|||||||
ret = True
|
ret = True
|
||||||
profileid = self._get_id(profile)
|
profileid = self._get_id(profile)
|
||||||
if add_volumes:
|
if add_volumes:
|
||||||
LOG.info(_LI('Adding volumes to cg %s.'), profile['name'])
|
LOG.info('Adding volumes to cg %s.', profile['name'])
|
||||||
ret = self._add_cg_volumes(profileid, add_volumes)
|
ret = self._add_cg_volumes(profileid, add_volumes)
|
||||||
if ret and remove_volumes:
|
if ret and remove_volumes:
|
||||||
LOG.info(_LI('Removing volumes from cg %s.'), profile['name'])
|
LOG.info('Removing volumes from cg %s.', profile['name'])
|
||||||
ret = self._remove_cg_volumes(profileid, remove_volumes)
|
ret = self._remove_cg_volumes(profileid, remove_volumes)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -2666,7 +2662,7 @@ class StorageCenterApi(object):
|
|||||||
'CreateReplay'
|
'CreateReplay'
|
||||||
% self._get_id(profile), payload, True)
|
% self._get_id(profile), payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('CreateReplay success %s'), replayid)
|
LOG.info('CreateReplay success %s', replayid)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
@ -2716,7 +2712,7 @@ class StorageCenterApi(object):
|
|||||||
|
|
||||||
replays = self._get_json(r)
|
replays = self._get_json(r)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to locate snapshot %s'), replayid)
|
LOG.error('Unable to locate snapshot %s', replayid)
|
||||||
|
|
||||||
return replays
|
return replays
|
||||||
|
|
||||||
@ -2780,7 +2776,7 @@ class StorageCenterApi(object):
|
|||||||
|
|
||||||
# If we actually have a place to put our volume create it
|
# If we actually have a place to put our volume create it
|
||||||
if folder is None:
|
if folder is None:
|
||||||
LOG.warning(_LW('Unable to create folder %s'), self.vfname)
|
LOG.warning('Unable to create folder %s', self.vfname)
|
||||||
|
|
||||||
# Rename and move our volume.
|
# Rename and move our volume.
|
||||||
payload = {}
|
payload = {}
|
||||||
@ -2882,7 +2878,7 @@ class StorageCenterApi(object):
|
|||||||
r = self.client.put('StorageCenter/ScVolume/%s' %
|
r = self.client.put('StorageCenter/ScVolume/%s' %
|
||||||
self._get_id(scvolume), payload, True)
|
self._get_id(scvolume), payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('Volume %s unmanaged.'), scvolume['name'])
|
LOG.info('Volume %s unmanaged.', scvolume['name'])
|
||||||
else:
|
else:
|
||||||
msg = _('Unable to rename volume %(existing)s to %(newname)s') % {
|
msg = _('Unable to rename volume %(existing)s to %(newname)s') % {
|
||||||
'existing': scvolume['name'],
|
'existing': scvolume['name'],
|
||||||
@ -2917,7 +2913,7 @@ class StorageCenterApi(object):
|
|||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
return self._get_json(r)
|
return self._get_json(r)
|
||||||
|
|
||||||
LOG.error(_LE('Unable to find or create QoS Node named %s'), qosnode)
|
LOG.error('Unable to find or create QoS Node named %s', qosnode)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=_('Failed to find QoSnode'))
|
data=_('Failed to find QoSnode'))
|
||||||
|
|
||||||
@ -2961,7 +2957,7 @@ class StorageCenterApi(object):
|
|||||||
if replication.get('destinationScSerialNumber') == destssn:
|
if replication.get('destinationScSerialNumber') == destssn:
|
||||||
return replication
|
return replication
|
||||||
# Unable to locate replication.
|
# Unable to locate replication.
|
||||||
LOG.warning(_LW('Unable to locate replication %(vol)s to %(ssn)s'),
|
LOG.warning('Unable to locate replication %(vol)s to %(ssn)s',
|
||||||
{'vol': scvolume.get('name'),
|
{'vol': scvolume.get('name'),
|
||||||
'ssn': destssn})
|
'ssn': destssn})
|
||||||
return None
|
return None
|
||||||
@ -2985,13 +2981,13 @@ class StorageCenterApi(object):
|
|||||||
async=True)
|
async=True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
# check that we whacked the dest volume
|
# check that we whacked the dest volume
|
||||||
LOG.info(_LI('Replication %(vol)s to %(dest)s.'),
|
LOG.info('Replication %(vol)s to %(dest)s.',
|
||||||
{'vol': scvolume.get('name'),
|
{'vol': scvolume.get('name'),
|
||||||
'dest': destssn})
|
'dest': destssn})
|
||||||
|
|
||||||
return True
|
return True
|
||||||
LOG.error(_LE('Unable to delete replication for '
|
LOG.error('Unable to delete replication for '
|
||||||
'%(vol)s to %(dest)s.'),
|
'%(vol)s to %(dest)s.',
|
||||||
{'vol': scvolume.get('name'),
|
{'vol': scvolume.get('name'),
|
||||||
'dest': destssn})
|
'dest': destssn})
|
||||||
return False
|
return False
|
||||||
@ -3014,8 +3010,8 @@ class StorageCenterApi(object):
|
|||||||
diskfolder = self._get_json(r)[0]
|
diskfolder = self._get_json(r)[0]
|
||||||
except Exception:
|
except Exception:
|
||||||
# We just log this as an error and return nothing.
|
# We just log this as an error and return nothing.
|
||||||
LOG.error(_LE('Unable to find '
|
LOG.error('Unable to find '
|
||||||
'disk folder %(name)s on %(ssn)s'),
|
'disk folder %(name)s on %(ssn)s',
|
||||||
{'name': foldername,
|
{'name': foldername,
|
||||||
'ssn': ssn})
|
'ssn': ssn})
|
||||||
return diskfolder
|
return diskfolder
|
||||||
@ -3061,7 +3057,7 @@ class StorageCenterApi(object):
|
|||||||
r = self.client.post('StorageCenter/ScReplication', payload, True)
|
r = self.client.post('StorageCenter/ScReplication', payload, True)
|
||||||
# 201 expected.
|
# 201 expected.
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('Replication created for %(volname)s to %(destsc)s'),
|
LOG.info('Replication created for %(volname)s to %(destsc)s',
|
||||||
{'volname': scvolume.get('name'),
|
{'volname': scvolume.get('name'),
|
||||||
'destsc': destssn})
|
'destsc': destssn})
|
||||||
screpl = self._get_json(r)
|
screpl = self._get_json(r)
|
||||||
@ -3069,7 +3065,7 @@ class StorageCenterApi(object):
|
|||||||
# Check we did something.
|
# Check we did something.
|
||||||
if not screpl:
|
if not screpl:
|
||||||
# Failed to launch. Inform user. Throw.
|
# Failed to launch. Inform user. Throw.
|
||||||
LOG.error(_LE('Unable to replicate %(volname)s to %(destsc)s'),
|
LOG.error('Unable to replicate %(volname)s to %(destsc)s',
|
||||||
{'volname': scvolume.get('name'),
|
{'volname': scvolume.get('name'),
|
||||||
'destsc': destssn})
|
'destsc': destssn})
|
||||||
return screpl
|
return screpl
|
||||||
@ -3206,8 +3202,8 @@ class StorageCenterApi(object):
|
|||||||
True)
|
True)
|
||||||
# 201 expected.
|
# 201 expected.
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('Replication created for '
|
LOG.info('Replication created for '
|
||||||
'%(src)s to %(dest)s'),
|
'%(src)s to %(dest)s',
|
||||||
{'src': svolume.get('name'),
|
{'src': svolume.get('name'),
|
||||||
'dest': dvolume.get('name')})
|
'dest': dvolume.get('name')})
|
||||||
screpl = self._get_json(r)
|
screpl = self._get_json(r)
|
||||||
@ -3267,8 +3263,8 @@ class StorageCenterApi(object):
|
|||||||
if (self.rename_volume(svolume, self._repl_name(name)) and
|
if (self.rename_volume(svolume, self._repl_name(name)) and
|
||||||
self.rename_volume(dvolume, name)):
|
self.rename_volume(dvolume, name)):
|
||||||
return True
|
return True
|
||||||
LOG.warning(_LW('flip_replication: Unable to replicate '
|
LOG.warning('flip_replication: Unable to replicate '
|
||||||
'%(name)s from %(src)s to %(dst)s'),
|
'%(name)s from %(src)s to %(dst)s',
|
||||||
{'name': name,
|
{'name': name,
|
||||||
'src': dvolume['scSerialNumber'],
|
'src': dvolume['scSerialNumber'],
|
||||||
'dst': svolume['scSerialNumber']})
|
'dst': svolume['scSerialNumber']})
|
||||||
@ -3290,8 +3286,8 @@ class StorageCenterApi(object):
|
|||||||
progress['amountRemaining'].split(' ', 1)[0])
|
progress['amountRemaining'].split(' ', 1)[0])
|
||||||
return progress['synced'], remaining
|
return progress['synced'], remaining
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('replication_progress: Invalid replication'
|
LOG.warning('replication_progress: Invalid replication'
|
||||||
' progress information returned: %s'),
|
' progress information returned: %s',
|
||||||
progress)
|
progress)
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
@ -3416,14 +3412,14 @@ class StorageCenterApi(object):
|
|||||||
pscqos = self._find_qos(primaryqos)
|
pscqos = self._find_qos(primaryqos)
|
||||||
sscqos = self._find_qos(secondaryqos, destssn)
|
sscqos = self._find_qos(secondaryqos, destssn)
|
||||||
if not destssn:
|
if not destssn:
|
||||||
LOG.error(_LE('create_live_volume: Unable to find remote %s'),
|
LOG.error('create_live_volume: Unable to find remote %s',
|
||||||
remotessn)
|
remotessn)
|
||||||
elif not pscqos:
|
elif not pscqos:
|
||||||
LOG.error(_LE('create_live_volume: Unable to find or create '
|
LOG.error('create_live_volume: Unable to find or create '
|
||||||
'qos node %s'), primaryqos)
|
'qos node %s', primaryqos)
|
||||||
elif not sscqos:
|
elif not sscqos:
|
||||||
LOG.error(_LE('create_live_volume: Unable to find or create remote'
|
LOG.error('create_live_volume: Unable to find or create remote'
|
||||||
' qos node %(qos)s on %(ssn)s'),
|
' qos node %(qos)s on %(ssn)s',
|
||||||
{'qos': secondaryqos, 'ssn': destssn})
|
{'qos': secondaryqos, 'ssn': destssn})
|
||||||
else:
|
else:
|
||||||
payload = {}
|
payload = {}
|
||||||
@ -3451,12 +3447,12 @@ class StorageCenterApi(object):
|
|||||||
|
|
||||||
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
|
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info(_LI('create_live_volume: Live Volume created from'
|
LOG.info('create_live_volume: Live Volume created from'
|
||||||
'%(svol)s to %(ssn)s'),
|
'%(svol)s to %(ssn)s',
|
||||||
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
||||||
return self._get_json(r)
|
return self._get_json(r)
|
||||||
LOG.error(_LE('create_live_volume: Failed to create Live Volume from'
|
LOG.error('create_live_volume: Failed to create Live Volume from'
|
||||||
'%(svol)s to %(ssn)s'),
|
'%(svol)s to %(ssn)s',
|
||||||
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from oslo_utils import excutils
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell import dell_storagecenter_api
|
from cinder.volume.drivers.dell import dell_storagecenter_api
|
||||||
@ -88,7 +88,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
self.is_direct_connect = False
|
self.is_direct_connect = False
|
||||||
self.active_backend_id = kwargs.get('active_backend_id', None)
|
self.active_backend_id = kwargs.get('active_backend_id', None)
|
||||||
self.failed_over = True if self.active_backend_id else False
|
self.failed_over = True if self.active_backend_id else False
|
||||||
LOG.info(_LI('Loading %(name)s: Failover state is %(state)r'),
|
LOG.info('Loading %(name)s: Failover state is %(state)r',
|
||||||
{'name': self.backend_name,
|
{'name': self.backend_name,
|
||||||
'state': self.failed_over})
|
'state': self.failed_over})
|
||||||
self.storage_protocol = 'iSCSI'
|
self.storage_protocol = 'iSCSI'
|
||||||
@ -279,7 +279,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
try:
|
try:
|
||||||
api.delete_volume(volumename)
|
api.delete_volume(volumename)
|
||||||
except exception.VolumeBackendAPIException as ex:
|
except exception.VolumeBackendAPIException as ex:
|
||||||
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
|
LOG.info('Non fatal cleanup error: %s.', ex.msg)
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Create a volume."""
|
"""Create a volume."""
|
||||||
@ -324,7 +324,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# clean up the volume now.
|
# clean up the volume now.
|
||||||
self._cleanup_failed_create_volume(api, volume_name)
|
self._cleanup_failed_create_volume(api, volume_name)
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create volume %s'),
|
LOG.error('Failed to create volume %s',
|
||||||
volume_name)
|
volume_name)
|
||||||
if scvolume is None:
|
if scvolume is None:
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
@ -374,16 +374,15 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
if (sclivevolume and
|
if (sclivevolume and
|
||||||
sclivevolume.get('secondaryScSerialNumber') == ssn and
|
sclivevolume.get('secondaryScSerialNumber') == ssn and
|
||||||
api.delete_live_volume(sclivevolume, True)):
|
api.delete_live_volume(sclivevolume, True)):
|
||||||
LOG.info(_LI('%(vname)s\'s replication live volume has '
|
LOG.info('%(vname)s\'s replication live volume has '
|
||||||
'been deleted from storage Center %(sc)s,'),
|
'been deleted from storage Center %(sc)s,',
|
||||||
{'vname': volume.get('id'),
|
{'vname': volume.get('id'),
|
||||||
'sc': ssn})
|
'sc': ssn})
|
||||||
return True
|
return True
|
||||||
# If we are here either we do not have a live volume, we do not have
|
# If we are here either we do not have a live volume, we do not have
|
||||||
# one on our configured SC or we were not able to delete it.
|
# one on our configured SC or we were not able to delete it.
|
||||||
# Either way, warn and leave.
|
# Either way, warn and leave.
|
||||||
LOG.warning(_LW('Unable to delete %s live volume.'),
|
LOG.warning('Unable to delete %s live volume.', volume.get('id'))
|
||||||
volume.get('id'))
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _delete_replications(self, api, volume):
|
def _delete_replications(self, api, volume):
|
||||||
@ -409,8 +408,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
ssn = int(ssnstring)
|
ssn = int(ssnstring)
|
||||||
# Are we a replication or a live volume?
|
# Are we a replication or a live volume?
|
||||||
if not api.delete_replication(scvol, ssn):
|
if not api.delete_replication(scvol, ssn):
|
||||||
LOG.warning(_LW('Unable to delete replication of Volume '
|
LOG.warning('Unable to delete replication of Volume '
|
||||||
'%(vname)s to Storage Center %(sc)s.'),
|
'%(vname)s to Storage Center %(sc)s.',
|
||||||
{'vname': volume_name,
|
{'vname': volume_name,
|
||||||
'sc': ssnstring})
|
'sc': ssnstring})
|
||||||
# If none of that worked or there was nothing to do doesn't matter.
|
# If none of that worked or there was nothing to do doesn't matter.
|
||||||
@ -439,7 +438,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
deleted = api.delete_volume(volume_name, provider_id)
|
deleted = api.delete_volume(volume_name, provider_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to delete volume %s'),
|
LOG.error('Failed to delete volume %s',
|
||||||
volume_name)
|
volume_name)
|
||||||
|
|
||||||
# if there was an error we will have raised an
|
# if there was an error we will have raised an
|
||||||
@ -466,8 +465,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
return {'status': fields.SnapshotStatus.AVAILABLE,
|
return {'status': fields.SnapshotStatus.AVAILABLE,
|
||||||
'provider_id': scvolume['instanceId']}
|
'provider_id': scvolume['instanceId']}
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unable to locate volume:%s'),
|
LOG.warning('Unable to locate volume:%s', volume_name)
|
||||||
volume_name)
|
|
||||||
|
|
||||||
snapshot['status'] = fields.SnapshotStatus.ERROR
|
snapshot['status'] = fields.SnapshotStatus.ERROR
|
||||||
msg = _('Failed to create snapshot %s') % snapshot_id
|
msg = _('Failed to create snapshot %s') % snapshot_id
|
||||||
@ -540,8 +538,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# Clean up after ourselves.
|
# Clean up after ourselves.
|
||||||
self._cleanup_failed_create_volume(api, volume_name)
|
self._cleanup_failed_create_volume(api, volume_name)
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create volume %s'),
|
LOG.error('Failed to create volume %s', volume_name)
|
||||||
volume_name)
|
|
||||||
if scvolume is not None:
|
if scvolume is not None:
|
||||||
LOG.debug('Volume %(vol)s created from %(snap)s',
|
LOG.debug('Volume %(vol)s created from %(snap)s',
|
||||||
{'vol': volume_name,
|
{'vol': volume_name,
|
||||||
@ -604,8 +601,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# Clean up after ourselves.
|
# Clean up after ourselves.
|
||||||
self._cleanup_failed_create_volume(api, volume_name)
|
self._cleanup_failed_create_volume(api, volume_name)
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create volume %s'),
|
LOG.error('Failed to create volume %s', volume_name)
|
||||||
volume_name)
|
|
||||||
if scvolume is not None:
|
if scvolume is not None:
|
||||||
LOG.debug('Volume %(vol)s cloned from %(src)s',
|
LOG.debug('Volume %(vol)s cloned from %(src)s',
|
||||||
{'vol': volume_name,
|
{'vol': volume_name,
|
||||||
@ -656,7 +652,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
self._is_live_vol(volume))
|
self._is_live_vol(volume))
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to ensure export of volume %s'),
|
LOG.error('Failed to ensure export of volume %s',
|
||||||
volume_name)
|
volume_name)
|
||||||
if scvolume is None:
|
if scvolume is None:
|
||||||
msg = _('Unable to find volume %s') % volume_name
|
msg = _('Unable to find volume %s') % volume_name
|
||||||
@ -738,7 +734,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
data['free_capacity_gb'] = freespacegb
|
data['free_capacity_gb'] = freespacegb
|
||||||
else:
|
else:
|
||||||
# Soldier on. Just return 0 for this iteration.
|
# Soldier on. Just return 0 for this iteration.
|
||||||
LOG.error(_LE('Unable to retrieve volume stats.'))
|
LOG.error('Unable to retrieve volume stats.')
|
||||||
data['total_capacity_gb'] = 0
|
data['total_capacity_gb'] = 0
|
||||||
data['free_capacity_gb'] = 0
|
data['free_capacity_gb'] = 0
|
||||||
|
|
||||||
@ -782,7 +778,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
|
|
||||||
return model_update
|
return model_update
|
||||||
# The world was horrible to us so we should error and leave.
|
# The world was horrible to us so we should error and leave.
|
||||||
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
|
LOG.error('Unable to rename the logical volume for volume: %s',
|
||||||
original_volume_name)
|
original_volume_name)
|
||||||
|
|
||||||
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
|
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
|
||||||
@ -799,7 +795,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
with self._client.open_connection() as api:
|
with self._client.open_connection() as api:
|
||||||
cgroup = api.create_replay_profile(gid)
|
cgroup = api.create_replay_profile(gid)
|
||||||
if cgroup:
|
if cgroup:
|
||||||
LOG.info(_LI('Created Consistency Group %s'), gid)
|
LOG.info('Created Consistency Group %s', gid)
|
||||||
return
|
return
|
||||||
msg = _('Unable to create consistency group %s') % gid
|
msg = _('Unable to create consistency group %s') % gid
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
@ -860,11 +856,11 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
with self._client.open_connection() as api:
|
with self._client.open_connection() as api:
|
||||||
profile = api.find_replay_profile(gid)
|
profile = api.find_replay_profile(gid)
|
||||||
if not profile:
|
if not profile:
|
||||||
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
|
LOG.error('Cannot find Consistency Group %s', gid)
|
||||||
elif api.update_cg_volumes(profile,
|
elif api.update_cg_volumes(profile,
|
||||||
add_volumes,
|
add_volumes,
|
||||||
remove_volumes):
|
remove_volumes):
|
||||||
LOG.info(_LI('Updated Consistency Group %s'), gid)
|
LOG.info('Updated Consistency Group %s', gid)
|
||||||
# we need nothing updated above us so just return None.
|
# we need nothing updated above us so just return None.
|
||||||
return None, None, None
|
return None, None, None
|
||||||
# Things did not go well so throw.
|
# Things did not go well so throw.
|
||||||
@ -900,9 +896,9 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
return model_update, snapshot_updates
|
return model_update, snapshot_updates
|
||||||
|
|
||||||
# That didn't go well. Tell them why. Then bomb out.
|
# That didn't go well. Tell them why. Then bomb out.
|
||||||
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
|
LOG.error('Failed to snap Consistency Group %s', cgid)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
|
LOG.error('Cannot find Consistency Group %s', cgid)
|
||||||
|
|
||||||
msg = _('Unable to snap Consistency Group %s') % cgid
|
msg = _('Unable to snap Consistency Group %s') % cgid
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
@ -924,7 +920,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
with self._client.open_connection() as api:
|
with self._client.open_connection() as api:
|
||||||
profile = api.find_replay_profile(cgid)
|
profile = api.find_replay_profile(cgid)
|
||||||
if profile:
|
if profile:
|
||||||
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
|
LOG.info('Deleting snapshot %(ss)s from %(pro)s',
|
||||||
{'ss': snapshotid,
|
{'ss': snapshotid,
|
||||||
'pro': profile})
|
'pro': profile})
|
||||||
if not api.delete_cg_replay(profile, snapshotid):
|
if not api.delete_cg_replay(profile, snapshotid):
|
||||||
@ -1058,7 +1054,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
'spec': requested})
|
'spec': requested})
|
||||||
return current, requested
|
return current, requested
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Retype was to same Storage Profile.'))
|
LOG.info('Retype was to same Storage Profile.')
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def _retype_replication(self, api, volume, scvolume, new_type, diff):
|
def _retype_replication(self, api, volume, scvolume, new_type, diff):
|
||||||
@ -1104,8 +1100,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
dictionary of its reported capabilities (Not Used).
|
dictionary of its reported capabilities (Not Used).
|
||||||
:returns: Boolean or Boolean, model_update tuple.
|
:returns: Boolean or Boolean, model_update tuple.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s '
|
LOG.info('retype: volume_name: %(name)s new_type: %(newtype)s '
|
||||||
'diff: %(diff)s host: %(host)s'),
|
'diff: %(diff)s host: %(host)s',
|
||||||
{'name': volume.get('id'), 'newtype': new_type,
|
{'name': volume.get('id'), 'newtype': new_type,
|
||||||
'diff': diff, 'host': host})
|
'diff': diff, 'host': host})
|
||||||
model_update = None
|
model_update = None
|
||||||
@ -1118,7 +1114,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# Get our volume
|
# Get our volume
|
||||||
scvolume = api.find_volume(volume_name, provider_id)
|
scvolume = api.find_volume(volume_name, provider_id)
|
||||||
if scvolume is None:
|
if scvolume is None:
|
||||||
LOG.error(_LE('Retype unable to find volume %s.'),
|
LOG.error('Retype unable to find volume %s.',
|
||||||
volume_name)
|
volume_name)
|
||||||
return False
|
return False
|
||||||
# Check our specs.
|
# Check our specs.
|
||||||
@ -1130,7 +1126,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# if there is a change and it didn't work fast fail.
|
# if there is a change and it didn't work fast fail.
|
||||||
if (current != requested and not
|
if (current != requested and not
|
||||||
api.update_storage_profile(scvolume, requested)):
|
api.update_storage_profile(scvolume, requested)):
|
||||||
LOG.error(_LE('Failed to update storage profile'))
|
LOG.error('Failed to update storage profile')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Replay profiles.
|
# Replay profiles.
|
||||||
@ -1141,7 +1137,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# if there is a change and it didn't work fast fail.
|
# if there is a change and it didn't work fast fail.
|
||||||
if requested and not api.update_replay_profiles(scvolume,
|
if requested and not api.update_replay_profiles(scvolume,
|
||||||
requested):
|
requested):
|
||||||
LOG.error(_LE('Failed to update replay profiles'))
|
LOG.error('Failed to update replay profiles')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Volume QOS profiles.
|
# Volume QOS profiles.
|
||||||
@ -1151,8 +1147,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
'storagetype:volumeqos'))
|
'storagetype:volumeqos'))
|
||||||
if current != requested:
|
if current != requested:
|
||||||
if not api.update_qos_profile(scvolume, requested):
|
if not api.update_qos_profile(scvolume, requested):
|
||||||
LOG.error(_LE('Failed to update volume '
|
LOG.error('Failed to update volume qos profile')
|
||||||
'qos profile'))
|
|
||||||
|
|
||||||
# Group QOS profiles.
|
# Group QOS profiles.
|
||||||
current, requested = (
|
current, requested = (
|
||||||
@ -1162,8 +1157,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
if current != requested:
|
if current != requested:
|
||||||
if not api.update_qos_profile(scvolume, requested,
|
if not api.update_qos_profile(scvolume, requested,
|
||||||
True):
|
True):
|
||||||
LOG.error(_LE('Failed to update group '
|
LOG.error('Failed to update group qos profile')
|
||||||
'qos profile'))
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Data reduction profiles.
|
# Data reduction profiles.
|
||||||
@ -1174,8 +1168,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
if current != requested:
|
if current != requested:
|
||||||
if not api.update_datareduction_profile(scvolume,
|
if not api.update_datareduction_profile(scvolume,
|
||||||
requested):
|
requested):
|
||||||
LOG.error(_LE('Failed to update data reduction '
|
LOG.error('Failed to update data reduction '
|
||||||
'profile'))
|
'profile')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Active Replay
|
# Active Replay
|
||||||
@ -1186,8 +1180,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
if current != requested and not (
|
if current != requested and not (
|
||||||
api.update_replicate_active_replay(
|
api.update_replicate_active_replay(
|
||||||
scvolume, requested == '<is> True')):
|
scvolume, requested == '<is> True')):
|
||||||
LOG.error(_LE('Failed to apply '
|
LOG.error('Failed to apply '
|
||||||
'replication:activereplay setting'))
|
'replication:activereplay setting')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Deal with replication.
|
# Deal with replication.
|
||||||
@ -1231,8 +1225,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
destssn = ssn
|
destssn = ssn
|
||||||
break
|
break
|
||||||
except exception.VolumeBackendAPIException:
|
except exception.VolumeBackendAPIException:
|
||||||
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
|
LOG.warning('SSN %s appears to be down.', ssn)
|
||||||
LOG.info(_LI('replication failover secondary is %(ssn)s'),
|
LOG.info('replication failover secondary is %(ssn)s',
|
||||||
{'ssn': destssn})
|
{'ssn': destssn})
|
||||||
return destssn
|
return destssn
|
||||||
|
|
||||||
@ -1309,8 +1303,8 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
ovol, 'org:' + ovol['name']):
|
ovol, 'org:' + ovol['name']):
|
||||||
# Not a reason to fail but will possibly
|
# Not a reason to fail but will possibly
|
||||||
# cause confusion so warn.
|
# cause confusion so warn.
|
||||||
LOG.warning(_LW('Unable to locate and rename '
|
LOG.warning('Unable to locate and rename '
|
||||||
'original volume: %s'),
|
'original volume: %s',
|
||||||
item['ovol'])
|
item['ovol'])
|
||||||
item['status'] = 'synced'
|
item['status'] = 'synced'
|
||||||
else:
|
else:
|
||||||
@ -1329,9 +1323,9 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
if lastremain == currentremain:
|
if lastremain == currentremain:
|
||||||
# One chance down. Warn user.
|
# One chance down. Warn user.
|
||||||
deadcount -= 1
|
deadcount -= 1
|
||||||
LOG.warning(_LW('Waiting for replications to complete. '
|
LOG.warning('Waiting for replications to complete. '
|
||||||
'No progress for %(timeout)d seconds. '
|
'No progress for %(timeout)d seconds. '
|
||||||
'deadcount = %(cnt)d'),
|
'deadcount = %(cnt)d',
|
||||||
{'timeout': self.failback_timeout,
|
{'timeout': self.failback_timeout,
|
||||||
'cnt': deadcount})
|
'cnt': deadcount})
|
||||||
else:
|
else:
|
||||||
@ -1341,13 +1335,13 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
|
|
||||||
# If we've used up our 5 chances we error and log..
|
# If we've used up our 5 chances we error and log..
|
||||||
if deadcount == 0:
|
if deadcount == 0:
|
||||||
LOG.error(_LE('Replication progress has stopped: '
|
LOG.error('Replication progress has stopped: %f remaining.',
|
||||||
'%f remaining.'), currentremain)
|
currentremain)
|
||||||
for item in items:
|
for item in items:
|
||||||
if item['status'] == 'inprogress':
|
if item['status'] == 'inprogress':
|
||||||
LOG.error(_LE('Failback failed for volume: %s. '
|
LOG.error('Failback failed for volume: %s. '
|
||||||
'Timeout waiting for replication to '
|
'Timeout waiting for replication to '
|
||||||
'sync with original volume.'),
|
'sync with original volume.',
|
||||||
item['volume']['id'])
|
item['volume']['id'])
|
||||||
item['status'] = 'error'
|
item['status'] = 'error'
|
||||||
break
|
break
|
||||||
@ -1426,7 +1420,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
:param qosnode: Dell QOS node object.
|
:param qosnode: Dell QOS node object.
|
||||||
:return: replitem dict.
|
:return: replitem dict.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('failback_volumes: replicated volume'))
|
LOG.info('failback_volumes: replicated volume')
|
||||||
# Get our current volume.
|
# Get our current volume.
|
||||||
cvol = api.find_volume(volume['id'], volume['provider_id'])
|
cvol = api.find_volume(volume['id'], volume['provider_id'])
|
||||||
# Original volume on the primary.
|
# Original volume on the primary.
|
||||||
@ -1446,7 +1440,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
nvolid = screpl['destinationVolume']['instanceId']
|
nvolid = screpl['destinationVolume']['instanceId']
|
||||||
status = 'inprogress'
|
status = 'inprogress'
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Unable to restore %s'), volume['id'])
|
LOG.error('Unable to restore %s', volume['id'])
|
||||||
screplid = None
|
screplid = None
|
||||||
nvolid = None
|
nvolid = None
|
||||||
status = 'error'
|
status = 'error'
|
||||||
@ -1481,14 +1475,14 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
sclivevolume = api.get_live_volume(provider_id)
|
sclivevolume = api.get_live_volume(provider_id)
|
||||||
# TODO(tswanson): Check swapped state first.
|
# TODO(tswanson): Check swapped state first.
|
||||||
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
|
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
|
||||||
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
|
LOG.info('Success swapping sclivevolume roles %s', id)
|
||||||
model_update = {
|
model_update = {
|
||||||
'status': 'available',
|
'status': 'available',
|
||||||
'replication_status': fields.ReplicationStatus.ENABLED,
|
'replication_status': fields.ReplicationStatus.ENABLED,
|
||||||
'provider_id':
|
'provider_id':
|
||||||
sclivevolume['secondaryVolume']['instanceId']}
|
sclivevolume['secondaryVolume']['instanceId']}
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Failure swapping roles %s'), id)
|
LOG.info('Failure swapping roles %s', id)
|
||||||
model_update = {'status': 'error'}
|
model_update = {'status': 'error'}
|
||||||
|
|
||||||
return model_update
|
return model_update
|
||||||
@ -1509,7 +1503,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
:param volumes: List of volumes that need to be failed back.
|
:param volumes: List of volumes that need to be failed back.
|
||||||
:return: volume_updates for the list of volumes.
|
:return: volume_updates for the list of volumes.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('failback_volumes'))
|
LOG.info('failback_volumes')
|
||||||
with self._client.open_connection() as api:
|
with self._client.open_connection() as api:
|
||||||
# Get our qosnode. This is a good way to make sure the backend
|
# Get our qosnode. This is a good way to make sure the backend
|
||||||
# is still setup so that we can do this.
|
# is still setup so that we can do this.
|
||||||
@ -1524,7 +1518,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
# Trundle through the volumes. Update non replicated to alive again
|
# Trundle through the volumes. Update non replicated to alive again
|
||||||
# and reverse the replications for the remaining volumes.
|
# and reverse the replications for the remaining volumes.
|
||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
LOG.info(_LI('failback_volumes: starting volume: %s'), volume)
|
LOG.info('failback_volumes: starting volume: %s', volume)
|
||||||
model_update = {}
|
model_update = {}
|
||||||
if volume.get('replication_driver_data'):
|
if volume.get('replication_driver_data'):
|
||||||
rspecs = self._get_replication_specs(
|
rspecs = self._get_replication_specs(
|
||||||
@ -1567,12 +1561,12 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
rvol = api.break_replication(id, provider_id, destssn)
|
rvol = api.break_replication(id, provider_id, destssn)
|
||||||
model_update = {}
|
model_update = {}
|
||||||
if rvol:
|
if rvol:
|
||||||
LOG.info(_LI('Success failing over volume %s'), id)
|
LOG.info('Success failing over volume %s', id)
|
||||||
model_update = {'replication_status':
|
model_update = {'replication_status':
|
||||||
fields.ReplicationStatus.FAILED_OVER,
|
fields.ReplicationStatus.FAILED_OVER,
|
||||||
'provider_id': rvol['instanceId']}
|
'provider_id': rvol['instanceId']}
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Failed failing over volume %s'), id)
|
LOG.info('Failed failing over volume %s', id)
|
||||||
model_update = {'status': 'error'}
|
model_update = {'status': 'error'}
|
||||||
|
|
||||||
return model_update
|
return model_update
|
||||||
@ -1585,11 +1579,11 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
swapped = api.is_swapped(provider_id, sclivevolume)
|
swapped = api.is_swapped(provider_id, sclivevolume)
|
||||||
# If we aren't swapped try it. If fail error out.
|
# If we aren't swapped try it. If fail error out.
|
||||||
if not swapped and not api.swap_roles_live_volume(sclivevolume):
|
if not swapped and not api.swap_roles_live_volume(sclivevolume):
|
||||||
LOG.info(_LI('Failure swapping roles %s'), id)
|
LOG.info('Failure swapping roles %s', id)
|
||||||
model_update = {'status': 'error'}
|
model_update = {'status': 'error'}
|
||||||
return model_update
|
return model_update
|
||||||
|
|
||||||
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
|
LOG.info('Success swapping sclivevolume roles %s', id)
|
||||||
sclivevolume = api.get_live_volume(provider_id)
|
sclivevolume = api.get_live_volume(provider_id)
|
||||||
model_update = {
|
model_update = {
|
||||||
'replication_status':
|
'replication_status':
|
||||||
@ -1628,7 +1622,7 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
raise exception.InvalidReplicationTarget(
|
raise exception.InvalidReplicationTarget(
|
||||||
reason=_('Already failed over'))
|
reason=_('Already failed over'))
|
||||||
|
|
||||||
LOG.info(_LI('Failing backend to %s'), secondary_id)
|
LOG.info('Failing backend to %s', secondary_id)
|
||||||
# basic check
|
# basic check
|
||||||
if self.replication_enabled:
|
if self.replication_enabled:
|
||||||
with self._client.open_connection() as api:
|
with self._client.open_connection() as api:
|
||||||
@ -1747,9 +1741,9 @@ class DellCommonDriver(driver.ManageableVD,
|
|||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
# Life is good. Let the world know what we've done.
|
# Life is good. Let the world know what we've done.
|
||||||
LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on '
|
LOG.info('manage_existing_snapshot: snapshot %(exist)s on '
|
||||||
'volume %(volume)s has been renamed to %(id)s and is '
|
'volume %(volume)s has been renamed to %(id)s and is '
|
||||||
'now managed by Cinder.'),
|
'now managed by Cinder.',
|
||||||
{'exist': screplay.get('description'),
|
{'exist': screplay.get('description'),
|
||||||
'volume': volume_name,
|
'volume': volume_name,
|
||||||
'id': snapshot_id})
|
'id': snapshot_id})
|
||||||
|
@ -18,7 +18,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell import dell_storagecenter_common
|
from cinder.volume.drivers.dell import dell_storagecenter_common
|
||||||
@ -147,11 +147,11 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
'discard': True}}
|
'discard': True}}
|
||||||
LOG.debug('Return FC data: %s', data)
|
LOG.debug('Return FC data: %s', data)
|
||||||
return data
|
return data
|
||||||
LOG.error(_LE('Lun mapping returned null!'))
|
LOG.error('Lun mapping returned null!')
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to initialize connection.'))
|
LOG.error('Failed to initialize connection.')
|
||||||
|
|
||||||
# We get here because our mapping is none so blow up.
|
# We get here because our mapping is none so blow up.
|
||||||
raise exception.VolumeBackendAPIException(_('Unable to map volume.'))
|
raise exception.VolumeBackendAPIException(_('Unable to map volume.'))
|
||||||
@ -187,8 +187,8 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
sclivevolume['secondaryVolume']['instanceId'])
|
sclivevolume['secondaryVolume']['instanceId'])
|
||||||
if secondaryvol:
|
if secondaryvol:
|
||||||
return api.find_wwns(secondaryvol, secondary)
|
return api.find_wwns(secondaryvol, secondary)
|
||||||
LOG.warning(_LW('Unable to map live volume secondary volume'
|
LOG.warning('Unable to map live volume secondary volume'
|
||||||
' %(vol)s to secondary server wwns: %(wwns)r'),
|
' %(vol)s to secondary server wwns: %(wwns)r',
|
||||||
{'vol': sclivevolume['secondaryVolume']['instanceName'],
|
{'vol': sclivevolume['secondaryVolume']['instanceName'],
|
||||||
'wwns': wwns})
|
'wwns': wwns})
|
||||||
return None, [], {}
|
return None, [], {}
|
||||||
@ -253,7 +253,7 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to terminate connection'))
|
LOG.error('Failed to terminate connection')
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
_('Terminate connection unable to connect to backend.'))
|
_('Terminate connection unable to connect to backend.'))
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell import dell_storagecenter_common
|
from cinder.volume.drivers.dell import dell_storagecenter_common
|
||||||
@ -92,8 +92,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
islivevol = self._is_live_vol(volume)
|
islivevol = self._is_live_vol(volume)
|
||||||
initiator_name = connector.get('initiator')
|
initiator_name = connector.get('initiator')
|
||||||
multipath = connector.get('multipath', False)
|
multipath = connector.get('multipath', False)
|
||||||
LOG.info(_LI('initialize_ connection: %(vol)s:%(pid)s:'
|
LOG.info('initialize_ connection: %(vol)s:%(pid)s:'
|
||||||
'%(intr)s. Multipath is %(mp)r'),
|
'%(intr)s. Multipath is %(mp)r',
|
||||||
{'vol': volume_name,
|
{'vol': volume_name,
|
||||||
'pid': provider_id,
|
'pid': provider_id,
|
||||||
'intr': initiator_name,
|
'intr': initiator_name,
|
||||||
@ -166,7 +166,7 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
# Re-raise any backend exception.
|
# Re-raise any backend exception.
|
||||||
except exception.VolumeBackendAPIException:
|
except exception.VolumeBackendAPIException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to initialize connection'))
|
LOG.error('Failed to initialize connection')
|
||||||
# If there is a data structure issue then detail the exception
|
# If there is a data structure issue then detail the exception
|
||||||
# and bail with a Backend Exception.
|
# and bail with a Backend Exception.
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
@ -211,8 +211,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
'target_lun': None,
|
'target_lun': None,
|
||||||
'target_luns': [],
|
'target_luns': [],
|
||||||
}
|
}
|
||||||
LOG.warning(_LW('Unable to map live volume secondary volume'
|
LOG.warning('Unable to map live volume secondary volume'
|
||||||
' %(vol)s to secondary server intiator: %(init)r'),
|
' %(vol)s to secondary server intiator: %(init)r',
|
||||||
{'vol': sclivevolume['secondaryVolume']['instanceName'],
|
{'vol': sclivevolume['secondaryVolume']['instanceName'],
|
||||||
'init': initiatorname})
|
'init': initiatorname})
|
||||||
return data
|
return data
|
||||||
@ -255,8 +255,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
|||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to terminate connection '
|
LOG.error('Failed to terminate connection '
|
||||||
'%(initiator)s %(vol)s'),
|
'%(initiator)s %(vol)s',
|
||||||
{'initiator': initiator_name,
|
{'initiator': initiator_name,
|
||||||
'vol': volume_name})
|
'vol': volume_name})
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
|
@ -29,7 +29,7 @@ from oslo_utils import excutils
|
|||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW, _LI
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import ssh_utils
|
from cinder import ssh_utils
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -199,7 +199,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
|
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
|
||||||
desc = _("Error executing PS command")
|
desc = _("Error executing PS command")
|
||||||
cmdout = '\n'.join(out)
|
cmdout = '\n'.join(out)
|
||||||
LOG.error(_LE("%s"), cmdout)
|
LOG.error(cmdout)
|
||||||
raise processutils.ProcessExecutionError(
|
raise processutils.ProcessExecutionError(
|
||||||
stdout=cmdout, cmd=command, description=desc)
|
stdout=cmdout, cmd=command, description=desc)
|
||||||
return out
|
return out
|
||||||
@ -232,12 +232,12 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
while attempts > 0:
|
while attempts > 0:
|
||||||
attempts -= 1
|
attempts -= 1
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('PS-driver: executing "%s".'), command)
|
LOG.info('PS-driver: executing "%s".', command)
|
||||||
return self._ssh_execute(
|
return self._ssh_execute(
|
||||||
ssh, command,
|
ssh, command,
|
||||||
timeout=self.configuration.ssh_conn_timeout)
|
timeout=self.configuration.ssh_conn_timeout)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error running command.'))
|
LOG.exception('Error running command.')
|
||||||
greenthread.sleep(random.randint(20, 500) / 100.0)
|
greenthread.sleep(random.randint(20, 500) / 100.0)
|
||||||
msg = (_("SSH Command failed after '%(total_attempts)r' "
|
msg = (_("SSH Command failed after '%(total_attempts)r' "
|
||||||
"attempts : '%(command)s'") %
|
"attempts : '%(command)s'") %
|
||||||
@ -247,7 +247,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Error running SSH command: "%s".'), command)
|
LOG.error('Error running SSH command: "%s".', command)
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
super(PSSeriesISCSIDriver, self).check_for_setup_error()
|
super(PSSeriesISCSIDriver, self).check_for_setup_error()
|
||||||
@ -398,11 +398,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
out_tup = line.rstrip().partition(' ')
|
out_tup = line.rstrip().partition(' ')
|
||||||
self._group_ip = out_tup[-1]
|
self._group_ip = out_tup[-1]
|
||||||
|
|
||||||
LOG.info(_LI('PS-driver: Setup is complete, group IP is "%s".'),
|
LOG.info('PS-driver: Setup is complete, group IP is "%s".',
|
||||||
self._group_ip)
|
self._group_ip)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to setup the Dell EMC PS driver.'))
|
LOG.error('Failed to setup the Dell EMC PS driver.')
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Create a volume."""
|
"""Create a volume."""
|
||||||
@ -419,7 +419,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
return self._get_volume_data(out)
|
return self._get_volume_data(out)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create volume "%s".'), volume['name'])
|
LOG.error('Failed to create volume "%s".', volume['name'])
|
||||||
|
|
||||||
def add_multihost_access(self, volume):
|
def add_multihost_access(self, volume):
|
||||||
"""Add multihost-access to a volume. Needed for live migration."""
|
"""Add multihost-access to a volume. Needed for live migration."""
|
||||||
@ -429,8 +429,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
self._eql_execute(*cmd)
|
self._eql_execute(*cmd)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to add multihost-access '
|
LOG.error('Failed to add multihost-access '
|
||||||
'for volume "%s".'),
|
'for volume "%s".',
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def _set_volume_description(self, volume, description):
|
def _set_volume_description(self, volume, description):
|
||||||
@ -441,8 +441,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
self._eql_execute(*cmd)
|
self._eql_execute(*cmd)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to set description '
|
LOG.error('Failed to set description '
|
||||||
'for volume "%s".'),
|
'for volume "%s".',
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
@ -452,12 +452,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
self._eql_execute('volume', 'select', volume['name'], 'offline')
|
self._eql_execute('volume', 'select', volume['name'], 'offline')
|
||||||
self._eql_execute('volume', 'delete', volume['name'])
|
self._eql_execute('volume', 'delete', volume['name'])
|
||||||
except exception.VolumeNotFound:
|
except exception.VolumeNotFound:
|
||||||
LOG.warning(_LW('Volume %s was not found while trying to delete '
|
LOG.warning('Volume %s was not found while trying to delete it.',
|
||||||
'it.'), volume['name'])
|
volume['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to delete '
|
LOG.error('Failed to delete volume "%s".', volume['name'])
|
||||||
'volume "%s".'), volume['name'])
|
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
"""Create snapshot of existing volume on appliance."""
|
"""Create snapshot of existing volume on appliance."""
|
||||||
@ -472,7 +471,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
snapshot['name'])
|
snapshot['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create snapshot of volume "%s".'),
|
LOG.error('Failed to create snapshot of volume "%s".',
|
||||||
snapshot['volume_name'])
|
snapshot['volume_name'])
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
@ -495,7 +494,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
return self._get_volume_data(out)
|
return self._get_volume_data(out)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create volume from snapshot "%s".'),
|
LOG.error('Failed to create volume from snapshot "%s".',
|
||||||
snapshot['name'])
|
snapshot['name'])
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
@ -513,7 +512,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
return self._get_volume_data(out)
|
return self._get_volume_data(out)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create clone of volume "%s".'),
|
LOG.error('Failed to create clone of volume "%s".',
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
@ -526,8 +525,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
LOG.debug('Snapshot %s could not be found.', snapshot['name'])
|
LOG.debug('Snapshot %s could not be found.', snapshot['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to delete snapshot %(snap)s of '
|
LOG.error('Failed to delete snapshot %(snap)s of '
|
||||||
'volume %(vol)s.'),
|
'volume %(vol)s.',
|
||||||
{'snap': snapshot['name'],
|
{'snap': snapshot['name'],
|
||||||
'vol': snapshot['volume_name']})
|
'vol': snapshot['volume_name']})
|
||||||
|
|
||||||
@ -548,8 +547,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
}
|
}
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to initialize connection '
|
LOG.error('Failed to initialize connection to volume "%s".',
|
||||||
'to volume "%s".'),
|
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||||
@ -563,8 +561,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
'access', 'delete', connection_id)
|
'access', 'delete', connection_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to terminate connection '
|
LOG.error('Failed to terminate connection to volume "%s".',
|
||||||
'to volume "%s".'),
|
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def create_export(self, context, volume, connector):
|
def create_export(self, context, volume, connector):
|
||||||
@ -585,11 +582,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
try:
|
try:
|
||||||
self._check_volume(volume)
|
self._check_volume(volume)
|
||||||
except exception.VolumeNotFound:
|
except exception.VolumeNotFound:
|
||||||
LOG.warning(_LW('Volume %s is not found!, it may have been '
|
LOG.warning('Volume %s is not found!, it may have been deleted.',
|
||||||
'deleted.'), volume['name'])
|
volume['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to ensure export of volume "%s".'),
|
LOG.error('Failed to ensure export of volume "%s".',
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
@ -606,15 +603,15 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
try:
|
try:
|
||||||
self._eql_execute('volume', 'select', volume['name'],
|
self._eql_execute('volume', 'select', volume['name'],
|
||||||
'size', "%sG" % new_size)
|
'size', "%sG" % new_size)
|
||||||
LOG.info(_LI('Volume %(name)s resized from '
|
LOG.info('Volume %(name)s resized from '
|
||||||
'%(current_size)sGB to %(new_size)sGB.'),
|
'%(current_size)sGB to %(new_size)sGB.',
|
||||||
{'name': volume['name'],
|
{'name': volume['name'],
|
||||||
'current_size': volume['size'],
|
'current_size': volume['size'],
|
||||||
'new_size': new_size})
|
'new_size': new_size})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to extend_volume %(name)s from '
|
LOG.error('Failed to extend_volume %(name)s from '
|
||||||
'%(current_size)sGB to %(new_size)sGB.'),
|
'%(current_size)sGB to %(new_size)sGB.',
|
||||||
{'name': volume['name'],
|
{'name': volume['name'],
|
||||||
'current_size': volume['size'],
|
'current_size': volume['size'],
|
||||||
'new_size': new_size})
|
'new_size': new_size})
|
||||||
@ -643,14 +640,14 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
self.add_multihost_access(volume)
|
self.add_multihost_access(volume)
|
||||||
data = self._get_volume_info(volume['name'])
|
data = self._get_volume_info(volume['name'])
|
||||||
updates = self._get_model_update(data['iSCSI_Name'])
|
updates = self._get_model_update(data['iSCSI_Name'])
|
||||||
LOG.info(_LI("Backend volume %(back_vol)s renamed to "
|
LOG.info("Backend volume %(back_vol)s renamed to "
|
||||||
"%(vol)s and is now managed by cinder."),
|
"%(vol)s and is now managed by cinder.",
|
||||||
{'back_vol': existing_volume_name,
|
{'back_vol': existing_volume_name,
|
||||||
'vol': volume['name']})
|
'vol': volume['name']})
|
||||||
return updates
|
return updates
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to manage volume "%s".'), volume['name'])
|
LOG.error('Failed to manage volume "%s".', volume['name'])
|
||||||
|
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
"""Return size of volume to be managed by manage_existing.
|
"""Return size of volume to be managed by manage_existing.
|
||||||
@ -674,13 +671,13 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
self._set_volume_description(volume, '"OpenStack UnManaged"')
|
self._set_volume_description(volume, '"OpenStack UnManaged"')
|
||||||
LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no "
|
LOG.info("Virtual volume %(disp)s '%(vol)s' is no "
|
||||||
"longer managed."),
|
"longer managed.",
|
||||||
{'disp': volume['display_name'],
|
{'disp': volume['display_name'],
|
||||||
'vol': volume['name']})
|
'vol': volume['name']})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to unmanage volume "%s".'),
|
LOG.error('Failed to unmanage volume "%s".',
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def local_path(self, volume):
|
def local_path(self, volume):
|
||||||
|
@ -31,7 +31,7 @@ from six.moves import urllib
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LW, _LE
|
from cinder.i18n import _
|
||||||
from cinder.image import image_utils
|
from cinder.image import image_utils
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -135,10 +135,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
if self.verify_server_certificate:
|
if self.verify_server_certificate:
|
||||||
self.server_certificate_path = (
|
self.server_certificate_path = (
|
||||||
self.configuration.sio_server_certificate_path)
|
self.configuration.sio_server_certificate_path)
|
||||||
LOG.info(_LI(
|
LOG.info("REST server IP: %(ip)s, port: %(port)s, username: %("
|
||||||
"REST server IP: %(ip)s, port: %(port)s, username: %("
|
"user)s. Verify server's certificate: %(verify_cert)s.",
|
||||||
"user)s. "
|
|
||||||
"Verify server's certificate: %(verify_cert)s."),
|
|
||||||
{'ip': self.server_ip,
|
{'ip': self.server_ip,
|
||||||
'port': self.server_port,
|
'port': self.server_port,
|
||||||
'user': self.server_username,
|
'user': self.server_username,
|
||||||
@ -153,29 +151,25 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
self.storage_pool_name = self.configuration.sio_storage_pool_name
|
self.storage_pool_name = self.configuration.sio_storage_pool_name
|
||||||
self.storage_pool_id = self.configuration.sio_storage_pool_id
|
self.storage_pool_id = self.configuration.sio_storage_pool_id
|
||||||
if self.storage_pool_name is None and self.storage_pool_id is None:
|
if self.storage_pool_name is None and self.storage_pool_id is None:
|
||||||
LOG.warning(_LW("No storage pool name or id was found."))
|
LOG.warning("No storage pool name or id was found.")
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info("Storage pools names: %(pools)s, "
|
||||||
"Storage pools names: %(pools)s, "
|
"storage pool name: %(pool)s, pool id: %(pool_id)s.",
|
||||||
"storage pool name: %(pool)s, pool id: %(pool_id)s."),
|
|
||||||
{'pools': self.storage_pools,
|
{'pools': self.storage_pools,
|
||||||
'pool': self.storage_pool_name,
|
'pool': self.storage_pool_name,
|
||||||
'pool_id': self.storage_pool_id})
|
'pool_id': self.storage_pool_id})
|
||||||
|
|
||||||
self.protection_domain_name = (
|
self.protection_domain_name = (
|
||||||
self.configuration.sio_protection_domain_name)
|
self.configuration.sio_protection_domain_name)
|
||||||
LOG.info(_LI(
|
LOG.info("Protection domain name: %(domain_name)s.",
|
||||||
"Protection domain name: %(domain_name)s."),
|
|
||||||
{'domain_name': self.protection_domain_name})
|
{'domain_name': self.protection_domain_name})
|
||||||
self.protection_domain_id = self.configuration.sio_protection_domain_id
|
self.protection_domain_id = self.configuration.sio_protection_domain_id
|
||||||
LOG.info(_LI(
|
LOG.info("Protection domain id: %(domain_id)s.",
|
||||||
"Protection domain id: %(domain_id)s."),
|
|
||||||
{'domain_id': self.protection_domain_id})
|
{'domain_id': self.protection_domain_id})
|
||||||
|
|
||||||
self.provisioning_type = (
|
self.provisioning_type = (
|
||||||
'thin' if self.configuration.san_thin_provision else 'thick')
|
'thin' if self.configuration.san_thin_provision else 'thick')
|
||||||
LOG.info(_LI(
|
LOG.info("Default provisioning type: %(provisioning_type)s.",
|
||||||
"Default provisioning type: %(provisioning_type)s."),
|
|
||||||
{'provisioning_type': self.provisioning_type})
|
{'provisioning_type': self.provisioning_type})
|
||||||
self.configuration.max_over_subscription_ratio = (
|
self.configuration.max_over_subscription_ratio = (
|
||||||
self.configuration.sio_max_over_subscription_ratio)
|
self.configuration.sio_max_over_subscription_ratio)
|
||||||
@ -199,8 +193,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
if (not self.protection_domain_name and
|
if (not self.protection_domain_name and
|
||||||
not self.protection_domain_id):
|
not self.protection_domain_id):
|
||||||
LOG.warning(_LW("No protection domain name or id "
|
LOG.warning("No protection domain name or id "
|
||||||
"was specified in configuration."))
|
"was specified in configuration.")
|
||||||
|
|
||||||
if self.protection_domain_name and self.protection_domain_id:
|
if self.protection_domain_name and self.protection_domain_id:
|
||||||
msg = _("Cannot specify both protection domain name "
|
msg = _("Cannot specify both protection domain name "
|
||||||
@ -220,8 +214,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
if not self.verify_server_certificate:
|
if not self.verify_server_certificate:
|
||||||
LOG.warning(_LW("Verify certificate is not set, using default of "
|
LOG.warning("Verify certificate is not set, using default of "
|
||||||
"False."))
|
"False.")
|
||||||
|
|
||||||
if self.verify_server_certificate and not self.server_certificate_path:
|
if self.verify_server_certificate and not self.server_certificate_path:
|
||||||
msg = _("Path to REST server's certificate must be specified.")
|
msg = _("Path to REST server's certificate must be specified.")
|
||||||
@ -273,10 +267,10 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
new_provisioning_type = storage_type.get(PROVISIONING_KEY)
|
new_provisioning_type = storage_type.get(PROVISIONING_KEY)
|
||||||
old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY)
|
old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY)
|
||||||
if new_provisioning_type is None and old_provisioning_type is not None:
|
if new_provisioning_type is None and old_provisioning_type is not None:
|
||||||
LOG.info(_LI("Using sio:provisioning_type for defining "
|
LOG.info("Using sio:provisioning_type for defining "
|
||||||
"thin or thick volume will be deprecated in the "
|
"thin or thick volume will be deprecated in the "
|
||||||
"Ocata release of OpenStack. Please use "
|
"Ocata release of OpenStack. Please use "
|
||||||
"provisioning:type configuration option."))
|
"provisioning:type configuration option.")
|
||||||
provisioning_type = old_provisioning_type
|
provisioning_type = old_provisioning_type
|
||||||
else:
|
else:
|
||||||
provisioning_type = new_provisioning_type
|
provisioning_type = new_provisioning_type
|
||||||
@ -298,11 +292,11 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
if extraspecs_key is not None else None)
|
if extraspecs_key is not None else None)
|
||||||
if extraspecs_limit is not None:
|
if extraspecs_limit is not None:
|
||||||
if qos_limit is not None:
|
if qos_limit is not None:
|
||||||
LOG.warning(_LW("QoS specs are overriding extra_specs."))
|
LOG.warning("QoS specs are overriding extra_specs.")
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Using extra_specs for defining QoS specs "
|
LOG.info("Using extra_specs for defining QoS specs "
|
||||||
"will be deprecated in the N release "
|
"will be deprecated in the N release "
|
||||||
"of OpenStack. Please use QoS specs."))
|
"of OpenStack. Please use QoS specs.")
|
||||||
return qos_limit if qos_limit is not None else extraspecs_limit
|
return qos_limit if qos_limit is not None else extraspecs_limit
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -341,11 +335,10 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
self._find_protection_domain_name_from_storage_type(storage_type))
|
self._find_protection_domain_name_from_storage_type(storage_type))
|
||||||
provisioning_type = self._find_provisioning_type(storage_type)
|
provisioning_type = self._find_provisioning_type(storage_type)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Volume type: %(volume_type)s, "
|
||||||
"Volume type: %(volume_type)s, "
|
|
||||||
"storage pool name: %(pool_name)s, "
|
"storage pool name: %(pool_name)s, "
|
||||||
"storage pool id: %(pool_id)s, protection domain id: "
|
"storage pool id: %(pool_id)s, protection domain id: "
|
||||||
"%(domain_id)s, protection domain name: %(domain_name)s."),
|
"%(domain_id)s, protection domain name: %(domain_name)s.",
|
||||||
{'volume_type': storage_type,
|
{'volume_type': storage_type,
|
||||||
'pool_name': storage_pool_name,
|
'pool_name': storage_pool_name,
|
||||||
'pool_id': storage_pool_id,
|
'pool_id': storage_pool_id,
|
||||||
@ -382,7 +375,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/types/Domain/instances/getByName::"
|
"/api/types/Domain/instances/getByName::"
|
||||||
"%(encoded_domain_name)s") % req_vars
|
"%(encoded_domain_name)s") % req_vars
|
||||||
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
|
LOG.info("ScaleIO get domain id by name request: %s.",
|
||||||
request)
|
request)
|
||||||
r = requests.get(
|
r = requests.get(
|
||||||
request,
|
request,
|
||||||
@ -405,7 +398,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Domain id is %s."), domain_id)
|
LOG.info("Domain id is %s.", domain_id)
|
||||||
pool_name = self.storage_pool_name
|
pool_name = self.storage_pool_name
|
||||||
pool_id = self.storage_pool_id
|
pool_id = self.storage_pool_id
|
||||||
if pool_name:
|
if pool_name:
|
||||||
@ -417,7 +410,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/types/Pool/instances/getByName::"
|
"/api/types/Pool/instances/getByName::"
|
||||||
"%(domain_id)s,%(encoded_domain_name)s") % req_vars
|
"%(domain_id)s,%(encoded_domain_name)s") % req_vars
|
||||||
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
|
LOG.info("ScaleIO get pool id by name request: %s.", request)
|
||||||
r = requests.get(
|
r = requests.get(
|
||||||
request,
|
request,
|
||||||
auth=(
|
auth=(
|
||||||
@ -440,7 +433,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Pool id is %s."), pool_id)
|
LOG.info("Pool id is %s.", pool_id)
|
||||||
if provisioning_type == 'thin':
|
if provisioning_type == 'thin':
|
||||||
provisioning = "ThinProvisioned"
|
provisioning = "ThinProvisioned"
|
||||||
# Default volume type is thick.
|
# Default volume type is thick.
|
||||||
@ -455,7 +448,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
'volumeType': provisioning,
|
'volumeType': provisioning,
|
||||||
'storagePoolId': pool_id}
|
'storagePoolId': pool_id}
|
||||||
|
|
||||||
LOG.info(_LI("Params for add volume request: %s."), params)
|
LOG.info("Params for add volume request: %s.", params)
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
"https://" +
|
"https://" +
|
||||||
self.server_ip +
|
self.server_ip +
|
||||||
@ -469,14 +462,14 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
self.server_token),
|
self.server_token),
|
||||||
verify=verify_cert)
|
verify=verify_cert)
|
||||||
response = r.json()
|
response = r.json()
|
||||||
LOG.info(_LI("Add volume response: %s"), response)
|
LOG.info("Add volume response: %s", response)
|
||||||
|
|
||||||
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
||||||
msg = (_("Error creating volume: %s.") % response['message'])
|
msg = (_("Error creating volume: %s.") % response['message'])
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."),
|
LOG.info("Created volume %(volname)s, volume id %(volid)s.",
|
||||||
{'volname': volname, 'volid': volume.id})
|
{'volname': volname, 'volid': volume.id})
|
||||||
|
|
||||||
real_size = int(self._round_to_num_gran(volume.size))
|
real_size = int(self._round_to_num_gran(volume.size))
|
||||||
@ -501,7 +494,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
return self._snapshot_volume(volume_id, snapname)
|
return self._snapshot_volume(volume_id, snapname)
|
||||||
|
|
||||||
def _snapshot_volume(self, vol_id, snapname):
|
def _snapshot_volume(self, vol_id, snapname):
|
||||||
LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") %
|
LOG.info("Snapshot volume %(vol)s into snapshot %(id)s.",
|
||||||
{'vol': vol_id, 'id': snapname})
|
{'vol': vol_id, 'id': snapname})
|
||||||
params = {
|
params = {
|
||||||
'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]}
|
'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]}
|
||||||
@ -510,7 +503,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/instances/System/action/snapshotVolumes") % req_vars
|
"/api/instances/System/action/snapshotVolumes") % req_vars
|
||||||
r, response = self._execute_scaleio_post_request(params, request)
|
r, response = self._execute_scaleio_post_request(params, request)
|
||||||
LOG.info(_LI("Snapshot volume response: %s."), response)
|
LOG.info("Snapshot volume response: %s.", response)
|
||||||
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
||||||
msg = (_("Failed creating snapshot for volume %(volname)s: "
|
msg = (_("Failed creating snapshot for volume %(volname)s: "
|
||||||
"%(response)s.") %
|
"%(response)s.") %
|
||||||
@ -537,8 +530,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
def _check_response(self, response, request, is_get_request=True,
|
def _check_response(self, response, request, is_get_request=True,
|
||||||
params=None):
|
params=None):
|
||||||
if response.status_code == 401 or response.status_code == 403:
|
if response.status_code == 401 or response.status_code == 403:
|
||||||
LOG.info(_LI("Token is invalid, going to re-login and get "
|
LOG.info("Token is invalid, going to re-login and get "
|
||||||
"a new one."))
|
"a new one.")
|
||||||
login_request = (
|
login_request = (
|
||||||
"https://" + self.server_ip +
|
"https://" + self.server_ip +
|
||||||
":" + self.server_port + "/api/login")
|
":" + self.server_port + "/api/login")
|
||||||
@ -552,8 +545,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
token = r.json()
|
token = r.json()
|
||||||
self.server_token = token
|
self.server_token = token
|
||||||
# Repeat request with valid token.
|
# Repeat request with valid token.
|
||||||
LOG.info(_LI(
|
LOG.info("Going to perform request again %s with valid token.",
|
||||||
"Going to perform request again %s with valid token."),
|
|
||||||
request)
|
request)
|
||||||
if is_get_request:
|
if is_get_request:
|
||||||
res = requests.get(request,
|
res = requests.get(request,
|
||||||
@ -579,9 +571,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
# exposed by the system
|
# exposed by the system
|
||||||
volume_id = snapshot.provider_id
|
volume_id = snapshot.provider_id
|
||||||
snapname = self._id_to_base64(volume.id)
|
snapname = self._id_to_base64(volume.id)
|
||||||
LOG.info(_LI(
|
LOG.info("ScaleIO create volume from snapshot: snapshot %(snapname)s "
|
||||||
"ScaleIO create volume from snapshot: snapshot %(snapname)s "
|
"to volume %(volname)s.",
|
||||||
"to volume %(volname)s."),
|
|
||||||
{'volname': volume_id,
|
{'volname': volume_id,
|
||||||
'snapname': snapname})
|
'snapname': snapname})
|
||||||
|
|
||||||
@ -608,8 +599,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def _extend_volume(self, volume_id, old_size, new_size):
|
def _extend_volume(self, volume_id, old_size, new_size):
|
||||||
vol_id = volume_id
|
vol_id = volume_id
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
|
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s.",
|
||||||
{'volname': vol_id,
|
{'volname': vol_id,
|
||||||
'new_size': new_size})
|
'new_size': new_size})
|
||||||
|
|
||||||
@ -619,7 +610,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/instances/Volume::%(vol_id)s"
|
"/api/instances/Volume::%(vol_id)s"
|
||||||
"/action/setVolumeSize") % req_vars
|
"/action/setVolumeSize") % req_vars
|
||||||
LOG.info(_LI("Change volume capacity request: %s."), request)
|
LOG.info("Change volume capacity request: %s.", request)
|
||||||
|
|
||||||
# Round up the volume size so that it is a granularity of 8 GBs
|
# Round up the volume size so that it is a granularity of 8 GBs
|
||||||
# because ScaleIO only supports volumes with a granularity of 8 GBs.
|
# because ScaleIO only supports volumes with a granularity of 8 GBs.
|
||||||
@ -630,8 +621,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
round_volume_capacity = self.configuration.sio_round_volume_capacity
|
round_volume_capacity = self.configuration.sio_round_volume_capacity
|
||||||
if not round_volume_capacity and not new_size % 8 == 0:
|
if not round_volume_capacity and not new_size % 8 == 0:
|
||||||
LOG.warning(_LW("ScaleIO only supports volumes with a granularity "
|
LOG.warning("ScaleIO only supports volumes with a granularity "
|
||||||
"of 8 GBs. The new volume size is: %d."),
|
"of 8 GBs. The new volume size is: %d.",
|
||||||
volume_new_size)
|
volume_new_size)
|
||||||
|
|
||||||
params = {'sizeInGB': six.text_type(volume_new_size)}
|
params = {'sizeInGB': six.text_type(volume_new_size)}
|
||||||
@ -658,9 +649,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
"""Creates a cloned volume."""
|
"""Creates a cloned volume."""
|
||||||
volume_id = src_vref['provider_id']
|
volume_id = src_vref['provider_id']
|
||||||
snapname = self._id_to_base64(volume.id)
|
snapname = self._id_to_base64(volume.id)
|
||||||
LOG.info(_LI(
|
LOG.info("ScaleIO create cloned volume: source volume %(src)s to "
|
||||||
"ScaleIO create cloned volume: source volume %(src)s to "
|
"target volume %(tgt)s.",
|
||||||
"target volume %(tgt)s."),
|
|
||||||
{'src': volume_id,
|
{'src': volume_id,
|
||||||
'tgt': snapname})
|
'tgt': snapname})
|
||||||
|
|
||||||
@ -691,9 +681,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/instances/Volume::%(vol_id)s"
|
"/api/instances/Volume::%(vol_id)s"
|
||||||
"/action/removeMappedSdc") % req_vars
|
"/action/removeMappedSdc") % req_vars
|
||||||
LOG.info(_LI(
|
LOG.info("Trying to unmap volume from all sdcs"
|
||||||
"Trying to unmap volume from all sdcs"
|
" before deletion: %s.",
|
||||||
" before deletion: %s."),
|
|
||||||
request)
|
request)
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
request,
|
request,
|
||||||
@ -725,14 +714,12 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
response = r.json()
|
response = r.json()
|
||||||
error_code = response['errorCode']
|
error_code = response['errorCode']
|
||||||
if error_code == VOLUME_NOT_FOUND_ERROR:
|
if error_code == VOLUME_NOT_FOUND_ERROR:
|
||||||
LOG.warning(_LW(
|
LOG.warning("Ignoring error in delete volume %s:"
|
||||||
"Ignoring error in delete volume %s:"
|
" Volume not found.", vol_id)
|
||||||
" Volume not found."), vol_id)
|
|
||||||
elif vol_id is None:
|
elif vol_id is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning("Volume does not have provider_id thus does not "
|
||||||
"Volume does not have provider_id thus does not "
|
|
||||||
"map to a ScaleIO volume. "
|
"map to a ScaleIO volume. "
|
||||||
"Allowing deletion to proceed."))
|
"Allowing deletion to proceed.")
|
||||||
else:
|
else:
|
||||||
msg = (_("Error deleting volume %(vol)s: %(err)s.") %
|
msg = (_("Error deleting volume %(vol)s: %(err)s.") %
|
||||||
{'vol': vol_id,
|
{'vol': vol_id,
|
||||||
@ -743,7 +730,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
"""Deletes a ScaleIO snapshot."""
|
"""Deletes a ScaleIO snapshot."""
|
||||||
snap_id = snapshot.provider_id
|
snap_id = snapshot.provider_id
|
||||||
LOG.info(_LI("ScaleIO delete snapshot."))
|
LOG.info("ScaleIO delete snapshot.")
|
||||||
return self._delete_volume(snap_id)
|
return self._delete_volume(snap_id)
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector, **kwargs):
|
def initialize_connection(self, volume, connector, **kwargs):
|
||||||
@ -762,13 +749,13 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
qos_specs = self._get_volumetype_qos(volume)
|
qos_specs = self._get_volumetype_qos(volume)
|
||||||
storage_type = extra_specs.copy()
|
storage_type = extra_specs.copy()
|
||||||
storage_type.update(qos_specs)
|
storage_type.update(qos_specs)
|
||||||
LOG.info(_LI("Volume type is %s."), storage_type)
|
LOG.info("Volume type is %s.", storage_type)
|
||||||
round_volume_size = self._round_to_num_gran(volume.size)
|
round_volume_size = self._round_to_num_gran(volume.size)
|
||||||
iops_limit = self._get_iops_limit(round_volume_size, storage_type)
|
iops_limit = self._get_iops_limit(round_volume_size, storage_type)
|
||||||
bandwidth_limit = self._get_bandwidth_limit(round_volume_size,
|
bandwidth_limit = self._get_bandwidth_limit(round_volume_size,
|
||||||
storage_type)
|
storage_type)
|
||||||
LOG.info(_LI("iops limit is %s"), iops_limit)
|
LOG.info("iops limit is %s", iops_limit)
|
||||||
LOG.info(_LI("bandwidth limit is %s"), bandwidth_limit)
|
LOG.info("bandwidth limit is %s", bandwidth_limit)
|
||||||
connection_properties['iopsLimit'] = iops_limit
|
connection_properties['iopsLimit'] = iops_limit
|
||||||
connection_properties['bandwidthLimit'] = bandwidth_limit
|
connection_properties['bandwidthLimit'] = bandwidth_limit
|
||||||
return {'driver_volume_type': 'scaleio',
|
return {'driver_volume_type': 'scaleio',
|
||||||
@ -782,10 +769,10 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
max_bandwidth = (self._round_to_num_gran(int(max_bandwidth),
|
max_bandwidth = (self._round_to_num_gran(int(max_bandwidth),
|
||||||
units.Ki))
|
units.Ki))
|
||||||
max_bandwidth = six.text_type(max_bandwidth)
|
max_bandwidth = six.text_type(max_bandwidth)
|
||||||
LOG.info(_LI("max bandwidth is: %s"), max_bandwidth)
|
LOG.info("max bandwidth is: %s", max_bandwidth)
|
||||||
bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB,
|
bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB,
|
||||||
None)
|
None)
|
||||||
LOG.info(_LI("bandwidth per gb is: %s"), bw_per_gb)
|
LOG.info("bandwidth per gb is: %s", bw_per_gb)
|
||||||
if bw_per_gb is None:
|
if bw_per_gb is None:
|
||||||
return max_bandwidth
|
return max_bandwidth
|
||||||
# Since ScaleIO volumes size is in 8GB granularity
|
# Since ScaleIO volumes size is in 8GB granularity
|
||||||
@ -805,9 +792,9 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
def _get_iops_limit(self, size, storage_type):
|
def _get_iops_limit(self, size, storage_type):
|
||||||
max_iops = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY,
|
max_iops = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY,
|
||||||
IOPS_LIMIT_KEY)
|
IOPS_LIMIT_KEY)
|
||||||
LOG.info(_LI("max iops is: %s"), max_iops)
|
LOG.info("max iops is: %s", max_iops)
|
||||||
iops_per_gb = self._find_limit(storage_type, QOS_IOPS_PER_GB, None)
|
iops_per_gb = self._find_limit(storage_type, QOS_IOPS_PER_GB, None)
|
||||||
LOG.info(_LI("iops per gb is: %s"), iops_per_gb)
|
LOG.info("iops per gb is: %s", iops_per_gb)
|
||||||
try:
|
try:
|
||||||
if iops_per_gb is None:
|
if iops_per_gb is None:
|
||||||
if max_iops is not None:
|
if max_iops is not None:
|
||||||
@ -862,9 +849,9 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/types/Domain/instances/getByName::"
|
"/api/types/Domain/instances/getByName::"
|
||||||
"%(encoded_domain_name)s") % req_vars
|
"%(encoded_domain_name)s") % req_vars
|
||||||
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
|
LOG.info("ScaleIO get domain id by name request: %s.",
|
||||||
request)
|
request)
|
||||||
LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."),
|
LOG.info("username: %(username)s, verify_cert: %(verify)s.",
|
||||||
{'username': self.server_username,
|
{'username': self.server_username,
|
||||||
'verify': verify_cert})
|
'verify': verify_cert})
|
||||||
r = requests.get(
|
r = requests.get(
|
||||||
@ -874,7 +861,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
self.server_token),
|
self.server_token),
|
||||||
verify=verify_cert)
|
verify=verify_cert)
|
||||||
r = self._check_response(r, request)
|
r = self._check_response(r, request)
|
||||||
LOG.info(_LI("Get domain by name response: %s"), r.text)
|
LOG.info("Get domain by name response: %s", r.text)
|
||||||
domain_id = r.json()
|
domain_id = r.json()
|
||||||
if not domain_id:
|
if not domain_id:
|
||||||
msg = (_("Domain with name %s wasn't found.")
|
msg = (_("Domain with name %s wasn't found.")
|
||||||
@ -888,7 +875,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
'err': domain_id['message']})
|
'err': domain_id['message']})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
LOG.info(_LI("Domain id is %s."), domain_id)
|
LOG.info("Domain id is %s.", domain_id)
|
||||||
|
|
||||||
# Get pool id from name.
|
# Get pool id from name.
|
||||||
encoded_pool_name = urllib.parse.quote(pool_name, '')
|
encoded_pool_name = urllib.parse.quote(pool_name, '')
|
||||||
@ -899,7 +886,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/types/Pool/instances/getByName::"
|
"/api/types/Pool/instances/getByName::"
|
||||||
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
|
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
|
||||||
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
|
LOG.info("ScaleIO get pool id by name request: %s.", request)
|
||||||
r = requests.get(
|
r = requests.get(
|
||||||
request,
|
request,
|
||||||
auth=(
|
auth=(
|
||||||
@ -921,7 +908,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
'err': pool_id['message']})
|
'err': pool_id['message']})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
LOG.info(_LI("Pool id is %s."), pool_id)
|
LOG.info("Pool id is %s.", pool_id)
|
||||||
req_vars = {'server_ip': self.server_ip,
|
req_vars = {'server_ip': self.server_ip,
|
||||||
'server_port': self.server_port}
|
'server_port': self.server_port}
|
||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
@ -941,7 +928,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
self.server_token),
|
self.server_token),
|
||||||
verify=verify_cert)
|
verify=verify_cert)
|
||||||
response = r.json()
|
response = r.json()
|
||||||
LOG.info(_LI("Query capacity stats response: %s."), response)
|
LOG.info("Query capacity stats response: %s.", response)
|
||||||
for res in response.values():
|
for res in response.values():
|
||||||
# Divide by two because ScaleIO creates a copy for each volume
|
# Divide by two because ScaleIO creates a copy for each volume
|
||||||
total_capacity_kb = (
|
total_capacity_kb = (
|
||||||
@ -956,10 +943,9 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
provisioned_capacity = (
|
provisioned_capacity = (
|
||||||
((res['thickCapacityInUseInKb'] +
|
((res['thickCapacityInUseInKb'] +
|
||||||
res['thinCapacityAllocatedInKm']) / 2) / units.Mi)
|
res['thinCapacityAllocatedInKm']) / 2) / units.Mi)
|
||||||
LOG.info(_LI(
|
LOG.info("Free capacity of pool %(pool)s is: %(free)s, "
|
||||||
"free capacity of pool %(pool)s is: %(free)s, "
|
|
||||||
"total capacity: %(total)s, "
|
"total capacity: %(total)s, "
|
||||||
"provisioned capacity: %(prov)s"),
|
"provisioned capacity: %(prov)s",
|
||||||
{'pool': pool_name,
|
{'pool': pool_name,
|
||||||
'free': free_capacity_gb,
|
'free': free_capacity_gb,
|
||||||
'total': total_capacity_gb,
|
'total': total_capacity_gb,
|
||||||
@ -983,15 +969,14 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
stats['total_capacity_gb'] = total_capacity
|
stats['total_capacity_gb'] = total_capacity
|
||||||
stats['free_capacity_gb'] = free_capacity
|
stats['free_capacity_gb'] = free_capacity
|
||||||
LOG.info(_LI(
|
LOG.info("Free capacity for backend is: %(free)s, total capacity: "
|
||||||
"Free capacity for backend is: %(free)s, total capacity: "
|
"%(total)s.",
|
||||||
"%(total)s."),
|
|
||||||
{'free': free_capacity,
|
{'free': free_capacity,
|
||||||
'total': total_capacity})
|
'total': total_capacity})
|
||||||
|
|
||||||
stats['pools'] = pools
|
stats['pools'] = pools
|
||||||
|
|
||||||
LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"])
|
LOG.info("Backend name is %s.", stats["volume_backend_name"])
|
||||||
|
|
||||||
self._stats = stats
|
self._stats = stats
|
||||||
|
|
||||||
@ -1046,7 +1031,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def _sio_detach_volume(self, volume):
|
def _sio_detach_volume(self, volume):
|
||||||
"""Call the connector.disconnect() """
|
"""Call the connector.disconnect() """
|
||||||
LOG.info(_LI("Calling os-brick to detach ScaleIO volume."))
|
LOG.info("Calling os-brick to detach ScaleIO volume.")
|
||||||
connection_properties = dict(self.connection_properties)
|
connection_properties = dict(self.connection_properties)
|
||||||
connection_properties['scaleIO_volname'] = self._id_to_base64(
|
connection_properties['scaleIO_volname'] = self._id_to_base64(
|
||||||
volume.id)
|
volume.id)
|
||||||
@ -1055,9 +1040,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||||
"""Fetch the image from image_service and write it to the volume."""
|
"""Fetch the image from image_service and write it to the volume."""
|
||||||
LOG.info(_LI(
|
LOG.info("ScaleIO copy_image_to_volume volume: %(vol)s image service: "
|
||||||
"ScaleIO copy_image_to_volume volume: %(vol)s image service: "
|
"%(service)s image id: %(id)s.",
|
||||||
"%(service)s image id: %(id)s."),
|
|
||||||
{'vol': volume,
|
{'vol': volume,
|
||||||
'service': six.text_type(image_service),
|
'service': six.text_type(image_service),
|
||||||
'id': six.text_type(image_id)})
|
'id': six.text_type(image_id)})
|
||||||
@ -1075,9 +1059,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
LOG.info(_LI(
|
LOG.info("ScaleIO copy_volume_to_image volume: %(vol)s image service: "
|
||||||
"ScaleIO copy_volume_to_image volume: %(vol)s image service: "
|
"%(service)s image meta: %(meta)s.",
|
||||||
"%(service)s image meta: %(meta)s."),
|
|
||||||
{'vol': volume,
|
{'vol': volume,
|
||||||
'service': six.text_type(image_service),
|
'service': six.text_type(image_service),
|
||||||
'meta': six.text_type(image_meta)})
|
'meta': six.text_type(image_meta)})
|
||||||
@ -1109,8 +1092,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
current_name = new_volume['id']
|
current_name = new_volume['id']
|
||||||
new_name = volume['id']
|
new_name = volume['id']
|
||||||
vol_id = new_volume['provider_id']
|
vol_id = new_volume['provider_id']
|
||||||
LOG.info(_LI("Renaming %(id)s from %(current_name)s to "
|
LOG.info("Renaming %(id)s from %(current_name)s to "
|
||||||
"%(new_name)s."),
|
"%(new_name)s.",
|
||||||
{'id': vol_id, 'current_name': current_name,
|
{'id': vol_id, 'current_name': current_name,
|
||||||
'new_name': new_name})
|
'new_name': new_name})
|
||||||
|
|
||||||
@ -1134,7 +1117,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/instances/Volume::%(id)s/action/setVolumeName" %
|
"/api/instances/Volume::%(id)s/action/setVolumeName" %
|
||||||
req_vars)
|
req_vars)
|
||||||
LOG.info(_LI("ScaleIO rename volume request: %s."), request)
|
LOG.info("ScaleIO rename volume request: %s.", request)
|
||||||
|
|
||||||
params = {'newName': new_name}
|
params = {'newName': new_name}
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
@ -1153,8 +1136,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
if ((error_code == VOLUME_NOT_FOUND_ERROR or
|
if ((error_code == VOLUME_NOT_FOUND_ERROR or
|
||||||
error_code == OLD_VOLUME_NOT_FOUND_ERROR or
|
error_code == OLD_VOLUME_NOT_FOUND_ERROR or
|
||||||
error_code == ILLEGAL_SYNTAX)):
|
error_code == ILLEGAL_SYNTAX)):
|
||||||
LOG.info(_LI("Ignoring renaming action because the volume "
|
LOG.info("Ignoring renaming action because the volume "
|
||||||
"%(vol)s is not a ScaleIO volume."),
|
"%(vol)s is not a ScaleIO volume.",
|
||||||
{'vol': vol_id})
|
{'vol': vol_id})
|
||||||
else:
|
else:
|
||||||
msg = (_("Error renaming volume %(vol)s: %(err)s.") %
|
msg = (_("Error renaming volume %(vol)s: %(err)s.") %
|
||||||
@ -1162,14 +1145,14 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("ScaleIO volume %(vol)s was renamed to "
|
LOG.info("ScaleIO volume %(vol)s was renamed to "
|
||||||
"%(new_name)s."),
|
"%(new_name)s.",
|
||||||
{'vol': vol_id, 'new_name': new_name})
|
{'vol': vol_id, 'new_name': new_name})
|
||||||
|
|
||||||
def _query_scaleio_volume(self, volume, existing_ref):
|
def _query_scaleio_volume(self, volume, existing_ref):
|
||||||
request = self._create_scaleio_get_volume_request(volume, existing_ref)
|
request = self._create_scaleio_get_volume_request(volume, existing_ref)
|
||||||
r, response = self._execute_scaleio_get_request(request)
|
r, response = self._execute_scaleio_get_request(request)
|
||||||
LOG.info(_LI("Get Volume response: %(res)s"),
|
LOG.info("Get Volume response: %(res)s",
|
||||||
{'res': response})
|
{'res': response})
|
||||||
self._manage_existing_check_legal_response(r, existing_ref)
|
self._manage_existing_check_legal_response(r, existing_ref)
|
||||||
return response
|
return response
|
||||||
@ -1258,7 +1241,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
'id': vol_id}
|
'id': vol_id}
|
||||||
request = ("https://%(server_ip)s:%(server_port)s"
|
request = ("https://%(server_ip)s:%(server_port)s"
|
||||||
"/api/instances/Volume::%(id)s" % req_vars)
|
"/api/instances/Volume::%(id)s" % req_vars)
|
||||||
LOG.info(_LI("ScaleIO get volume by id request: %s."), request)
|
LOG.info("ScaleIO get volume by id request: %s.", request)
|
||||||
return request
|
return request
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -1286,7 +1269,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
ScaleIO won't create CG until cg-snapshot creation,
|
ScaleIO won't create CG until cg-snapshot creation,
|
||||||
db will maintain the volumes and CG relationship.
|
db will maintain the volumes and CG relationship.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Creating Consistency Group"))
|
LOG.info("Creating Consistency Group")
|
||||||
model_update = {'status': 'available'}
|
model_update = {'status': 'available'}
|
||||||
return model_update
|
return model_update
|
||||||
|
|
||||||
@ -1295,7 +1278,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
ScaleIO will delete the volumes of the CG.
|
ScaleIO will delete the volumes of the CG.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Deleting Consistency Group"))
|
LOG.info("Deleting Consistency Group")
|
||||||
model_update = {'status': 'deleted'}
|
model_update = {'status': 'deleted'}
|
||||||
error_statuses = ['error', 'error_deleting']
|
error_statuses = ['error', 'error_deleting']
|
||||||
volumes_model_update = []
|
volumes_model_update = []
|
||||||
@ -1311,8 +1294,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
volumes_model_update.append(update_item)
|
volumes_model_update.append(update_item)
|
||||||
if model_update['status'] not in error_statuses:
|
if model_update['status'] not in error_statuses:
|
||||||
model_update['status'] = 'error_deleting'
|
model_update['status'] = 'error_deleting'
|
||||||
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
|
LOG.error("Failed to delete the volume %(vol)s of CG. "
|
||||||
"Exception: %(exception)s."),
|
"Exception: %(exception)s.",
|
||||||
{'vol': volume['name'], 'exception': err})
|
{'vol': volume['name'], 'exception': err})
|
||||||
return model_update, volumes_model_update
|
return model_update, volumes_model_update
|
||||||
|
|
||||||
@ -1323,7 +1306,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
'snapshotName': self._id_to_base64(snapshot['id'])}
|
'snapshotName': self._id_to_base64(snapshot['id'])}
|
||||||
snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots))
|
snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots))
|
||||||
r, response = self._snapshot_volume_group(snapshot_defs)
|
r, response = self._snapshot_volume_group(snapshot_defs)
|
||||||
LOG.info(_LI("Snapshot volume response: %s."), response)
|
LOG.info("Snapshot volume response: %s.", response)
|
||||||
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
||||||
msg = (_("Failed creating snapshot for group: "
|
msg = (_("Failed creating snapshot for group: "
|
||||||
"%(response)s.") %
|
"%(response)s.") %
|
||||||
@ -1356,9 +1339,9 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
snapshot_model_update.append(update_item)
|
snapshot_model_update.append(update_item)
|
||||||
if model_update['status'] not in error_statuses:
|
if model_update['status'] not in error_statuses:
|
||||||
model_update['status'] = 'error_deleting'
|
model_update['status'] = 'error_deleting'
|
||||||
LOG.error(_LE("Failed to delete the snapshot %(snap)s "
|
LOG.error("Failed to delete the snapshot %(snap)s "
|
||||||
"of cgsnapshot: %(cgsnapshot_id)s. "
|
"of cgsnapshot: %(cgsnapshot_id)s. "
|
||||||
"Exception: %(exception)s."),
|
"Exception: %(exception)s.",
|
||||||
{'snap': snapshot['name'],
|
{'snap': snapshot['name'],
|
||||||
'exception': err,
|
'exception': err,
|
||||||
'cgsnapshot_id': cgsnapshot.id})
|
'cgsnapshot_id': cgsnapshot.id})
|
||||||
@ -1381,7 +1364,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
source_vols,
|
source_vols,
|
||||||
volumes)
|
volumes)
|
||||||
r, response = self._snapshot_volume_group(list(snapshot_defs))
|
r, response = self._snapshot_volume_group(list(snapshot_defs))
|
||||||
LOG.info(_LI("Snapshot volume response: %s."), response)
|
LOG.info("Snapshot volume response: %s.", response)
|
||||||
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
|
||||||
msg = (_("Failed creating snapshot for group: "
|
msg = (_("Failed creating snapshot for group: "
|
||||||
"%(response)s.") %
|
"%(response)s.") %
|
||||||
@ -1407,7 +1390,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
|||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
def _snapshot_volume_group(self, snapshot_defs):
|
def _snapshot_volume_group(self, snapshot_defs):
|
||||||
LOG.info(_LI("ScaleIO snapshot group of volumes"))
|
LOG.info("ScaleIO snapshot group of volumes")
|
||||||
params = {'snapshotDefs': snapshot_defs}
|
params = {'snapshotDefs': snapshot_defs}
|
||||||
req_vars = {'server_ip': self.server_ip,
|
req_vars = {'server_ip': self.server_ip,
|
||||||
'server_port': self.server_port}
|
'server_port': self.server_port}
|
||||||
|
@ -21,8 +21,8 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
|
from cinder.i18n import _
|
||||||
from cinder import utils as cinder_utils
|
from cinder import utils as cinder_utils
|
||||||
from cinder.i18n import _, _LE, _LI
|
|
||||||
from cinder.volume.drivers.dell_emc.unity import client
|
from cinder.volume.drivers.dell_emc.unity import client
|
||||||
from cinder.volume.drivers.dell_emc.unity import utils
|
from cinder.volume.drivers.dell_emc.unity import utils
|
||||||
from cinder.volume import utils as vol_utils
|
from cinder.volume import utils as vol_utils
|
||||||
@ -111,21 +111,21 @@ class CommonAdapter(object):
|
|||||||
matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id,
|
matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id,
|
||||||
whitelist)
|
whitelist)
|
||||||
if not matched:
|
if not matched:
|
||||||
LOG.error(_LE('No matched ports filtered by all patterns: %s'),
|
LOG.error('No matched ports filtered by all patterns: %s',
|
||||||
whitelist)
|
whitelist)
|
||||||
raise exception.InvalidConfigurationValue(
|
raise exception.InvalidConfigurationValue(
|
||||||
option='%s.unity_io_ports' % self.config.config_group,
|
option='%s.unity_io_ports' % self.config.config_group,
|
||||||
value=self.config.unity_io_ports)
|
value=self.config.unity_io_ports)
|
||||||
|
|
||||||
if unmatched_whitelist:
|
if unmatched_whitelist:
|
||||||
LOG.error(_LE('No matched ports filtered by below patterns: %s'),
|
LOG.error('No matched ports filtered by below patterns: %s',
|
||||||
unmatched_whitelist)
|
unmatched_whitelist)
|
||||||
raise exception.InvalidConfigurationValue(
|
raise exception.InvalidConfigurationValue(
|
||||||
option='%s.unity_io_ports' % self.config.config_group,
|
option='%s.unity_io_ports' % self.config.config_group,
|
||||||
value=self.config.unity_io_ports)
|
value=self.config.unity_io_ports)
|
||||||
|
|
||||||
LOG.info(_LI('These ports %(matched)s will be used based on '
|
LOG.info('These ports %(matched)s will be used based on '
|
||||||
'the option unity_io_ports: %(config)s'),
|
'the option unity_io_ports: %(config)s',
|
||||||
{'matched': matched,
|
{'matched': matched,
|
||||||
'config': self.config.unity_io_ports})
|
'config': self.config.unity_io_ports})
|
||||||
return matched
|
return matched
|
||||||
@ -174,8 +174,8 @@ class CommonAdapter(object):
|
|||||||
qos_specs = utils.get_backend_qos_specs(volume)
|
qos_specs = utils.get_backend_qos_specs(volume)
|
||||||
limit_policy = self.client.get_io_limit_policy(qos_specs)
|
limit_policy = self.client.get_io_limit_policy(qos_specs)
|
||||||
|
|
||||||
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
|
LOG.info('Create Volume: %(volume)s Size: %(size)s '
|
||||||
'Pool: %(pool)s Qos: %(qos)s.'),
|
'Pool: %(pool)s Qos: %(qos)s.',
|
||||||
{'volume': volume_name,
|
{'volume': volume_name,
|
||||||
'size': volume_size,
|
'size': volume_size,
|
||||||
'pool': pool.name,
|
'pool': pool.name,
|
||||||
@ -193,8 +193,8 @@ class CommonAdapter(object):
|
|||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
lun_id = self.get_lun_id(volume)
|
lun_id = self.get_lun_id(volume)
|
||||||
if lun_id is None:
|
if lun_id is None:
|
||||||
LOG.info(_LI('Backend LUN not found, skipping the deletion. '
|
LOG.info('Backend LUN not found, skipping the deletion. '
|
||||||
'Volume: %(volume_name)s.'),
|
'Volume: %(volume_name)s.',
|
||||||
{'volume_name': volume.name})
|
{'volume_name': volume.name})
|
||||||
else:
|
else:
|
||||||
self.client.delete_lun(lun_id)
|
self.client.delete_lun(lun_id)
|
||||||
@ -457,8 +457,8 @@ class CommonAdapter(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
utils.ignore_exception(self.delete_volume, volume)
|
utils.ignore_exception(self.delete_volume, volume)
|
||||||
LOG.error(_LE('Failed to create cloned volume: %(vol_id)s, '
|
LOG.error('Failed to create cloned volume: %(vol_id)s, '
|
||||||
'from source unity snapshot: %(snap_name)s. '),
|
'from source unity snapshot: %(snap_name)s.',
|
||||||
{'vol_id': volume.id, 'snap_name': snap.name})
|
{'vol_id': volume.id, 'snap_name': snap.name})
|
||||||
|
|
||||||
return model_update
|
return model_update
|
||||||
|
@ -25,7 +25,7 @@ else:
|
|||||||
storops_ex = None
|
storops_ex = None
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.unity import utils
|
from cinder.volume.drivers.dell_emc.unity import utils
|
||||||
|
|
||||||
|
|
||||||
@ -98,13 +98,13 @@ class UnityClient(object):
|
|||||||
lun = None
|
lun = None
|
||||||
if lun_id is None and name is None:
|
if lun_id is None and name is None:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Both lun_id and name are None to get LUN. Return None."))
|
"Both lun_id and name are None to get LUN. Return None.")
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
lun = self.system.get_lun(_id=lun_id, name=name)
|
lun = self.system.get_lun(_id=lun_id, name=name)
|
||||||
except storops_ex.UnityResourceNotFoundError:
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("LUN id=%(id)s, name=%(name)s doesn't exist."),
|
"LUN id=%(id)s, name=%(name)s doesn't exist.",
|
||||||
{'id': lun_id, 'name': name})
|
{'id': lun_id, 'name': name})
|
||||||
return lun
|
return lun
|
||||||
|
|
||||||
@ -159,16 +159,16 @@ class UnityClient(object):
|
|||||||
'err': err})
|
'err': err})
|
||||||
except storops_ex.UnityDeleteAttachedSnapError as err:
|
except storops_ex.UnityDeleteAttachedSnapError as err:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.warning(_LW("Failed to delete snapshot %(snap_name)s "
|
LOG.warning("Failed to delete snapshot %(snap_name)s "
|
||||||
"which is in use. Message: %(err)s"),
|
"which is in use. Message: %(err)s",
|
||||||
{'snap_name': snap.name, 'err': err})
|
{'snap_name': snap.name, 'err': err})
|
||||||
|
|
||||||
def get_snap(self, name=None):
|
def get_snap(self, name=None):
|
||||||
try:
|
try:
|
||||||
return self.system.get_snap(name=name)
|
return self.system.get_snap(name=name)
|
||||||
except storops_ex.UnityResourceNotFoundError as err:
|
except storops_ex.UnityResourceNotFoundError as err:
|
||||||
msg = _LW("Snapshot %(name)s doesn't exist. Message: %(err)s")
|
LOG.warning("Snapshot %(name)s doesn't exist. Message: %(err)s",
|
||||||
LOG.warning(msg, {'name': name, 'err': err})
|
{'name': name, 'err': err})
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_host(self, name, uids):
|
def create_host(self, name, uids):
|
||||||
|
@ -24,7 +24,7 @@ from oslo_utils import units
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume import utils as vol_utils
|
from cinder.volume import utils as vol_utils
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
from cinder.zonemanager import utils as zm_utils
|
from cinder.zonemanager import utils as zm_utils
|
||||||
@ -70,11 +70,11 @@ def extract_provider_location(provider_location, key):
|
|||||||
if len(fields) == 2 and fields[0] == key:
|
if len(fields) == 2 and fields[0] == key:
|
||||||
return fields[1]
|
return fields[1]
|
||||||
else:
|
else:
|
||||||
msg = _LW('"%(key)s" is not found in provider '
|
LOG.warning('"%(key)s" is not found in provider '
|
||||||
'location "%(location)s."')
|
'location "%(location)s."',
|
||||||
LOG.warning(msg, {'key': key, 'location': provider_location})
|
{'key': key, 'location': provider_location})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Empty provider location received.'))
|
LOG.warning('Empty provider location received.')
|
||||||
|
|
||||||
|
|
||||||
def byte_to_gib(byte):
|
def byte_to_gib(byte):
|
||||||
@ -186,9 +186,9 @@ def ignore_exception(func, *args, **kwargs):
|
|||||||
try:
|
try:
|
||||||
func(*args, **kwargs)
|
func(*args, **kwargs)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Error occurred but ignored. Function: %(func_name)s, '
|
LOG.warning('Error occurred but ignored. Function: %(func_name)s, '
|
||||||
'args: %(args)s, kwargs: %(kwargs)s, '
|
'args: %(args)s, kwargs: %(kwargs)s, '
|
||||||
'exception: %(ex)s.'),
|
'exception: %(ex)s.',
|
||||||
{'func_name': func, 'args': args,
|
{'func_name': func, 'args': args,
|
||||||
'kwargs': kwargs, 'ex': ex})
|
'kwargs': kwargs, 'ex': ex})
|
||||||
|
|
||||||
|
@ -25,11 +25,11 @@ import six
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import utils as cinder_utils
|
from cinder.i18n import _
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
import cinder.objects.consistencygroup as cg_obj
|
||||||
from cinder.objects.consistencygroup import ConsistencyGroup
|
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.objects.group import Group
|
import cinder.objects.group as group_obj
|
||||||
|
from cinder import utils as cinder_utils
|
||||||
from cinder.volume.drivers.dell_emc.vmax import fast
|
from cinder.volume.drivers.dell_emc.vmax import fast
|
||||||
from cinder.volume.drivers.dell_emc.vmax import https
|
from cinder.volume.drivers.dell_emc.vmax import https
|
||||||
from cinder.volume.drivers.dell_emc.vmax import masking
|
from cinder.volume.drivers.dell_emc.vmax import masking
|
||||||
@ -138,9 +138,8 @@ class VMAXCommon(object):
|
|||||||
active_backend_id=None):
|
active_backend_id=None):
|
||||||
|
|
||||||
if not pywbemAvailable:
|
if not pywbemAvailable:
|
||||||
LOG.info(_LI(
|
LOG.info("Module PyWBEM not installed. Install PyWBEM using the "
|
||||||
"Module PyWBEM not installed. "
|
"python-pywbem package.")
|
||||||
"Install PyWBEM using the python-pywbem package."))
|
|
||||||
|
|
||||||
self.protocol = prtcl
|
self.protocol = prtcl
|
||||||
self.configuration = configuration
|
self.configuration = configuration
|
||||||
@ -221,9 +220,9 @@ class VMAXCommon(object):
|
|||||||
LOG.debug("The replication configuration is %(rep_config)s.",
|
LOG.debug("The replication configuration is %(rep_config)s.",
|
||||||
{'rep_config': self.rep_config})
|
{'rep_config': self.rep_config})
|
||||||
elif self.rep_devices and len(self.rep_devices) > 1:
|
elif self.rep_devices and len(self.rep_devices) > 1:
|
||||||
LOG.error(_LE("More than one replication target is configured. "
|
LOG.error("More than one replication target is configured. "
|
||||||
"EMC VMAX only suppports a single replication "
|
"EMC VMAX only suppports a single replication "
|
||||||
"target. Replication will not be enabled."))
|
"target. Replication will not be enabled.")
|
||||||
|
|
||||||
def _get_slo_workload_combinations(self, arrayInfoList):
|
def _get_slo_workload_combinations(self, arrayInfoList):
|
||||||
"""Method to query the array for SLO and Workloads.
|
"""Method to query the array for SLO and Workloads.
|
||||||
@ -356,9 +355,9 @@ class VMAXCommon(object):
|
|||||||
volumeName,
|
volumeName,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
|
|
||||||
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
|
LOG.info("Leaving create_volume: %(volumeName)s "
|
||||||
"Return code: %(rc)lu "
|
"Return code: %(rc)lu "
|
||||||
"volume dict: %(name)s."),
|
"volume dict: %(name)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'rc': rc,
|
'rc': rc,
|
||||||
'name': volumeDict})
|
'name': volumeDict})
|
||||||
@ -449,12 +448,12 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
:param volume: volume Object
|
:param volume: volume Object
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Deleting Volume: %(volume)s"),
|
LOG.info("Deleting Volume: %(volume)s",
|
||||||
{'volume': volume['name']})
|
{'volume': volume['name']})
|
||||||
|
|
||||||
rc, volumeName = self._delete_volume(volume)
|
rc, volumeName = self._delete_volume(volume)
|
||||||
LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: "
|
LOG.info("Leaving delete_volume: %(volumename)s Return code: "
|
||||||
"%(rc)lu."),
|
"%(rc)lu.",
|
||||||
{'volumename': volumeName,
|
{'volumename': volumeName,
|
||||||
'rc': rc})
|
'rc': rc})
|
||||||
|
|
||||||
@ -476,7 +475,7 @@ class VMAXCommon(object):
|
|||||||
:param snapshot: snapshot object
|
:param snapshot: snapshot object
|
||||||
:param volume: volume Object to create snapshot from
|
:param volume: volume Object to create snapshot from
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Delete Snapshot: %(snapshotName)s."),
|
LOG.info("Delete Snapshot: %(snapshotName)s.",
|
||||||
{'snapshotName': snapshot['name']})
|
{'snapshotName': snapshot['name']})
|
||||||
self._delete_snapshot(snapshot, volume['host'])
|
self._delete_snapshot(snapshot, volume['host'])
|
||||||
|
|
||||||
@ -516,12 +515,12 @@ class VMAXCommon(object):
|
|||||||
extraSpecs = self._get_replication_extraSpecs(
|
extraSpecs = self._get_replication_extraSpecs(
|
||||||
extraSpecs, self.rep_config)
|
extraSpecs, self.rep_config)
|
||||||
volumename = volume['name']
|
volumename = volume['name']
|
||||||
LOG.info(_LI("Unmap volume: %(volume)s."),
|
LOG.info("Unmap volume: %(volume)s.",
|
||||||
{'volume': volumename})
|
{'volume': volumename})
|
||||||
|
|
||||||
device_info = self.find_device_number(volume, connector['host'])
|
device_info = self.find_device_number(volume, connector['host'])
|
||||||
if 'hostlunid' not in device_info:
|
if 'hostlunid' not in device_info:
|
||||||
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
|
LOG.info("Volume %s is not mapped. No volume to unmap.",
|
||||||
volumename)
|
volumename)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -584,7 +583,7 @@ class VMAXCommon(object):
|
|||||||
is_multipath = connector.get('multipath', False)
|
is_multipath = connector.get('multipath', False)
|
||||||
|
|
||||||
volumeName = volume['name']
|
volumeName = volume['name']
|
||||||
LOG.info(_LI("Initialize connection: %(volume)s."),
|
LOG.info("Initialize connection: %(volume)s.",
|
||||||
{'volume': volumeName})
|
{'volume': volumeName})
|
||||||
self.conn = self._get_ecom_connection()
|
self.conn = self._get_ecom_connection()
|
||||||
deviceInfoDict = self._wrap_find_device_number(
|
deviceInfoDict = self._wrap_find_device_number(
|
||||||
@ -603,8 +602,8 @@ class VMAXCommon(object):
|
|||||||
# the state as is.
|
# the state as is.
|
||||||
|
|
||||||
deviceNumber = deviceInfoDict['hostlunid']
|
deviceNumber = deviceInfoDict['hostlunid']
|
||||||
LOG.info(_LI("Volume %(volume)s is already mapped. "
|
LOG.info("Volume %(volume)s is already mapped. "
|
||||||
"The device number is %(deviceNumber)s."),
|
"The device number is %(deviceNumber)s.",
|
||||||
{'volume': volumeName,
|
{'volume': volumeName,
|
||||||
'deviceNumber': deviceNumber})
|
'deviceNumber': deviceNumber})
|
||||||
# Special case, we still need to get the iscsi ip address.
|
# Special case, we still need to get the iscsi ip address.
|
||||||
@ -663,7 +662,7 @@ class VMAXCommon(object):
|
|||||||
if 'hostlunid' not in deviceInfoDict:
|
if 'hostlunid' not in deviceInfoDict:
|
||||||
# Did not successfully attach to host,
|
# Did not successfully attach to host,
|
||||||
# so a rollback for FAST is required.
|
# so a rollback for FAST is required.
|
||||||
LOG.error(_LE("Error Attaching volume %(vol)s."),
|
LOG.error("Error Attaching volume %(vol)s.",
|
||||||
{'vol': volumeName})
|
{'vol': volumeName})
|
||||||
if ((rollbackDict['fastPolicyName'] is not None) or
|
if ((rollbackDict['fastPolicyName'] is not None) or
|
||||||
(rollbackDict['isV3'] is not None)):
|
(rollbackDict['isV3'] is not None)):
|
||||||
@ -754,7 +753,7 @@ class VMAXCommon(object):
|
|||||||
:params connector: the connector Object
|
:params connector: the connector Object
|
||||||
"""
|
"""
|
||||||
volumename = volume['name']
|
volumename = volume['name']
|
||||||
LOG.info(_LI("Terminate connection: %(volume)s."),
|
LOG.info("Terminate connection: %(volume)s.",
|
||||||
{'volume': volumename})
|
{'volume': volumename})
|
||||||
|
|
||||||
self._unmap_lun(volume, connector)
|
self._unmap_lun(volume, connector)
|
||||||
@ -1020,11 +1019,11 @@ class VMAXCommon(object):
|
|||||||
provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = (
|
provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = (
|
||||||
self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo))
|
self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo))
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Capacity stats for SRP pool %(poolName)s on array "
|
"Capacity stats for SRP pool %(poolName)s on array "
|
||||||
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
|
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
|
||||||
"free_capacity_gb=%(free_capacity_gb)lu, "
|
"free_capacity_gb=%(free_capacity_gb)lu, "
|
||||||
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu"),
|
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu",
|
||||||
{'poolName': arrayInfo['PoolName'],
|
{'poolName': arrayInfo['PoolName'],
|
||||||
'arrayName': arrayInfo['SerialNumber'],
|
'arrayName': arrayInfo['SerialNumber'],
|
||||||
'total_capacity_gb': totalManagedSpaceGbs,
|
'total_capacity_gb': totalManagedSpaceGbs,
|
||||||
@ -1055,7 +1054,7 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
volumeName = volume['name']
|
volumeName = volume['name']
|
||||||
volumeStatus = volume['status']
|
volumeStatus = volume['status']
|
||||||
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
|
LOG.info("Migrating using retype Volume: %(volume)s.",
|
||||||
{'volume': volumeName})
|
{'volume': volumeName})
|
||||||
|
|
||||||
extraSpecs = self._initial_setup(volume)
|
extraSpecs = self._initial_setup(volume)
|
||||||
@ -1063,17 +1062,17 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
volumeInstance = self._find_lun(volume)
|
volumeInstance = self._find_lun(volume)
|
||||||
if volumeInstance is None:
|
if volumeInstance is None:
|
||||||
LOG.error(_LE("Volume %(name)s not found on the array. "
|
LOG.error("Volume %(name)s not found on the array. "
|
||||||
"No volume to migrate using retype."),
|
"No volume to migrate using retype.",
|
||||||
{'name': volumeName})
|
{'name': volumeName})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if extraSpecs[ISV3]:
|
if extraSpecs[ISV3]:
|
||||||
if self.utils.is_replication_enabled(extraSpecs):
|
if self.utils.is_replication_enabled(extraSpecs):
|
||||||
LOG.error(_LE("Volume %(name)s is replicated - "
|
LOG.error("Volume %(name)s is replicated - "
|
||||||
"Replicated volumes are not eligible for "
|
"Replicated volumes are not eligible for "
|
||||||
"storage assisted retype. Host assisted "
|
"storage assisted retype. Host assisted "
|
||||||
"retype is supported."),
|
"retype is supported.",
|
||||||
{'name': volumeName})
|
{'name': volumeName})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1097,12 +1096,12 @@ class VMAXCommon(object):
|
|||||||
:returns: boolean -- Always returns True
|
:returns: boolean -- Always returns True
|
||||||
:returns: dict -- Empty dict {}
|
:returns: dict -- Empty dict {}
|
||||||
"""
|
"""
|
||||||
LOG.warning(_LW("The VMAX plugin only supports Retype. "
|
LOG.warning("The VMAX plugin only supports Retype. "
|
||||||
"If a pool based migration is necessary "
|
"If a pool based migration is necessary "
|
||||||
"this will happen on a Retype "
|
"this will happen on a Retype "
|
||||||
"From the command line: "
|
"From the command line: "
|
||||||
"cinder --os-volume-api-version 2 retype <volumeId> "
|
"cinder --os-volume-api-version 2 retype <volumeId> "
|
||||||
"<volumeType> --migration-policy on-demand"))
|
"<volumeType> --migration-policy on-demand")
|
||||||
return True, {}
|
return True, {}
|
||||||
|
|
||||||
def _migrate_volume(
|
def _migrate_volume(
|
||||||
@ -1134,11 +1133,11 @@ class VMAXCommon(object):
|
|||||||
if moved is False and sourceFastPolicyName is not None:
|
if moved is False and sourceFastPolicyName is not None:
|
||||||
# Return the volume to the default source fast policy storage
|
# Return the volume to the default source fast policy storage
|
||||||
# group because the migrate was unsuccessful.
|
# group because the migrate was unsuccessful.
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Failed to migrate: %(volumeName)s from "
|
"Failed to migrate: %(volumeName)s from "
|
||||||
"default source storage group "
|
"default source storage group "
|
||||||
"for FAST policy: %(sourceFastPolicyName)s. "
|
"for FAST policy: %(sourceFastPolicyName)s. "
|
||||||
"Attempting cleanup... "),
|
"Attempting cleanup... ",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'sourceFastPolicyName': sourceFastPolicyName})
|
'sourceFastPolicyName': sourceFastPolicyName})
|
||||||
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
|
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
|
||||||
@ -1162,9 +1161,9 @@ class VMAXCommon(object):
|
|||||||
if not self._migrate_volume_fast_target(
|
if not self._migrate_volume_fast_target(
|
||||||
volumeInstance, storageSystemName,
|
volumeInstance, storageSystemName,
|
||||||
targetFastPolicyName, volumeName, extraSpecs):
|
targetFastPolicyName, volumeName, extraSpecs):
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Attempting a rollback of: %(volumeName)s to "
|
"Attempting a rollback of: %(volumeName)s to "
|
||||||
"original pool %(sourcePoolInstanceName)s."),
|
"original pool %(sourcePoolInstanceName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'sourcePoolInstanceName': sourcePoolInstanceName})
|
'sourcePoolInstanceName': sourcePoolInstanceName})
|
||||||
self._migrate_rollback(
|
self._migrate_rollback(
|
||||||
@ -1194,7 +1193,7 @@ class VMAXCommon(object):
|
|||||||
:param extraSpecs: extra specifications
|
:param extraSpecs: extra specifications
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
|
LOG.warning("_migrate_rollback on : %(volumeName)s.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
|
||||||
storageRelocationService = self.utils.find_storage_relocation_service(
|
storageRelocationService = self.utils.find_storage_relocation_service(
|
||||||
@ -1205,10 +1204,10 @@ class VMAXCommon(object):
|
|||||||
conn, storageRelocationService, volumeInstance.path,
|
conn, storageRelocationService, volumeInstance.path,
|
||||||
sourcePoolInstanceName, extraSpecs)
|
sourcePoolInstanceName, extraSpecs)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to return volume %(volumeName)s to "
|
"Failed to return volume %(volumeName)s to "
|
||||||
"original storage pool. Please contact your system "
|
"original storage pool. Please contact your system "
|
||||||
"administrator to return it to the correct location."),
|
"administrator to return it to the correct location.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
|
||||||
if sourceFastPolicyName is not None:
|
if sourceFastPolicyName is not None:
|
||||||
@ -1230,7 +1229,7 @@ class VMAXCommon(object):
|
|||||||
:returns: boolean -- True/False
|
:returns: boolean -- True/False
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
|
LOG.warning("_migrate_cleanup on : %(volumeName)s.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
return_to_default = True
|
return_to_default = True
|
||||||
controllerConfigurationService = (
|
controllerConfigurationService = (
|
||||||
@ -1279,9 +1278,9 @@ class VMAXCommon(object):
|
|||||||
:returns: boolean -- True/False
|
:returns: boolean -- True/False
|
||||||
"""
|
"""
|
||||||
falseRet = False
|
falseRet = False
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Adding volume: %(volumeName)s to default storage group "
|
"Adding volume: %(volumeName)s to default storage group "
|
||||||
"for FAST policy: %(fastPolicyName)s."),
|
"for FAST policy: %(fastPolicyName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'fastPolicyName': targetFastPolicyName})
|
'fastPolicyName': targetFastPolicyName})
|
||||||
|
|
||||||
@ -1294,9 +1293,9 @@ class VMAXCommon(object):
|
|||||||
self.conn, controllerConfigurationService,
|
self.conn, controllerConfigurationService,
|
||||||
targetFastPolicyName, volumeInstance, extraSpecs))
|
targetFastPolicyName, volumeInstance, extraSpecs))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to create or get default storage group for FAST policy"
|
"Unable to create or get default storage group for FAST policy"
|
||||||
": %(fastPolicyName)s."),
|
": %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': targetFastPolicyName})
|
{'fastPolicyName': targetFastPolicyName})
|
||||||
|
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1306,9 +1305,9 @@ class VMAXCommon(object):
|
|||||||
self.conn, controllerConfigurationService, volumeInstance,
|
self.conn, controllerConfigurationService, volumeInstance,
|
||||||
volumeName, targetFastPolicyName, extraSpecs))
|
volumeName, targetFastPolicyName, extraSpecs))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to verify that volume was added to storage group for "
|
"Failed to verify that volume was added to storage group for "
|
||||||
"FAST policy: %(fastPolicyName)s."),
|
"FAST policy: %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': targetFastPolicyName})
|
{'fastPolicyName': targetFastPolicyName})
|
||||||
return falseRet
|
return falseRet
|
||||||
|
|
||||||
@ -1348,9 +1347,9 @@ class VMAXCommon(object):
|
|||||||
targetPoolInstanceName = self.utils.get_pool_by_name(
|
targetPoolInstanceName = self.utils.get_pool_by_name(
|
||||||
self.conn, targetPoolName, storageSystemName)
|
self.conn, targetPoolName, storageSystemName)
|
||||||
if targetPoolInstanceName is None:
|
if targetPoolInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Error finding target pool instance name for pool: "
|
"Error finding target pool instance name for pool: "
|
||||||
"%(targetPoolName)s."),
|
"%(targetPoolName)s.",
|
||||||
{'targetPoolName': targetPoolName})
|
{'targetPoolName': targetPoolName})
|
||||||
return falseRet
|
return falseRet
|
||||||
try:
|
try:
|
||||||
@ -1360,9 +1359,9 @@ class VMAXCommon(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# Rollback by deleting the volume if adding the volume to the
|
# Rollback by deleting the volume if adding the volume to the
|
||||||
# default storage group were to fail.
|
# default storage group were to fail.
|
||||||
LOG.exception(_LE(
|
LOG.exception(
|
||||||
"Error migrating volume: %(volumename)s. "
|
"Error migrating volume: %(volumename)s. "
|
||||||
"to target pool %(targetPoolName)s."),
|
"to target pool %(targetPoolName)s.",
|
||||||
{'volumename': volumeName,
|
{'volumename': volumeName,
|
||||||
'targetPoolName': targetPoolName})
|
'targetPoolName': targetPoolName})
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1375,9 +1374,9 @@ class VMAXCommon(object):
|
|||||||
if (foundPoolInstanceName is None or
|
if (foundPoolInstanceName is None or
|
||||||
(foundPoolInstanceName['InstanceID'] !=
|
(foundPoolInstanceName['InstanceID'] !=
|
||||||
targetPoolInstanceName['InstanceID'])):
|
targetPoolInstanceName['InstanceID'])):
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Volume : %(volumeName)s. was not successfully migrated to "
|
"Volume : %(volumeName)s. was not successfully migrated to "
|
||||||
"target pool %(targetPoolName)s."),
|
"target pool %(targetPoolName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'targetPoolName': targetPoolName})
|
'targetPoolName': targetPoolName})
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1427,10 +1426,10 @@ class VMAXCommon(object):
|
|||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"The volume: %(volumename)s "
|
"The volume: %(volumename)s "
|
||||||
"was not first part of the default storage "
|
"was not first part of the default storage "
|
||||||
"group for FAST policy %(fastPolicyName)s."),
|
"group for FAST policy %(fastPolicyName)s.",
|
||||||
{'volumename': volumeName,
|
{'volumename': volumeName,
|
||||||
'fastPolicyName': sourceFastPolicyName})
|
'fastPolicyName': sourceFastPolicyName})
|
||||||
|
|
||||||
@ -1455,10 +1454,10 @@ class VMAXCommon(object):
|
|||||||
conn, controllerConfigurationService, volumeInstance,
|
conn, controllerConfigurationService, volumeInstance,
|
||||||
volumeName, targetFastPolicyName, extraSpecs))
|
volumeName, targetFastPolicyName, extraSpecs))
|
||||||
if assocDefaultStorageGroupName is None:
|
if assocDefaultStorageGroupName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to add %(volumeName)s "
|
"Failed to add %(volumeName)s "
|
||||||
"to default storage group for fast policy "
|
"to default storage group for fast policy "
|
||||||
"%(fastPolicyName)s."),
|
"%(fastPolicyName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'fastPolicyName': targetFastPolicyName})
|
'fastPolicyName': targetFastPolicyName})
|
||||||
|
|
||||||
@ -1483,7 +1482,7 @@ class VMAXCommon(object):
|
|||||||
"""
|
"""
|
||||||
falseRet = (False, None, None)
|
falseRet = (False, None, None)
|
||||||
if 'location_info' not in host['capabilities']:
|
if 'location_info' not in host['capabilities']:
|
||||||
LOG.error(_LE('Error getting array, pool, SLO and workload.'))
|
LOG.error('Error getting array, pool, SLO and workload.')
|
||||||
return falseRet
|
return falseRet
|
||||||
info = host['capabilities']['location_info']
|
info = host['capabilities']['location_info']
|
||||||
|
|
||||||
@ -1496,24 +1495,24 @@ class VMAXCommon(object):
|
|||||||
targetSlo = infoDetail[2]
|
targetSlo = infoDetail[2]
|
||||||
targetWorkload = infoDetail[3]
|
targetWorkload = infoDetail[3]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(_LE("Error parsing array, pool, SLO and workload."))
|
LOG.error("Error parsing array, pool, SLO and workload.")
|
||||||
|
|
||||||
if targetArraySerialNumber not in sourceArraySerialNumber:
|
if targetArraySerialNumber not in sourceArraySerialNumber:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"The source array : %(sourceArraySerialNumber)s does not "
|
"The source array : %(sourceArraySerialNumber)s does not "
|
||||||
"match the target array: %(targetArraySerialNumber)s "
|
"match the target array: %(targetArraySerialNumber)s "
|
||||||
"skipping storage-assisted migration."),
|
"skipping storage-assisted migration.",
|
||||||
{'sourceArraySerialNumber': sourceArraySerialNumber,
|
{'sourceArraySerialNumber': sourceArraySerialNumber,
|
||||||
'targetArraySerialNumber': targetArraySerialNumber})
|
'targetArraySerialNumber': targetArraySerialNumber})
|
||||||
return falseRet
|
return falseRet
|
||||||
|
|
||||||
if targetPoolName not in sourcePoolName:
|
if targetPoolName not in sourcePoolName:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Only SLO/workload migration within the same SRP Pool "
|
"Only SLO/workload migration within the same SRP Pool "
|
||||||
"is supported in this version "
|
"is supported in this version "
|
||||||
"The source pool : %(sourcePoolName)s does not "
|
"The source pool : %(sourcePoolName)s does not "
|
||||||
"match the target array: %(targetPoolName)s. "
|
"match the target array: %(targetPoolName)s. "
|
||||||
"Skipping storage-assisted migration."),
|
"Skipping storage-assisted migration.",
|
||||||
{'sourcePoolName': sourcePoolName,
|
{'sourcePoolName': sourcePoolName,
|
||||||
'targetPoolName': targetPoolName})
|
'targetPoolName': targetPoolName})
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1522,9 +1521,9 @@ class VMAXCommon(object):
|
|||||||
self.utils.get_storage_group_from_volume(
|
self.utils.get_storage_group_from_volume(
|
||||||
self.conn, volumeInstanceName, sgName))
|
self.conn, volumeInstanceName, sgName))
|
||||||
if foundStorageGroupInstanceName is None:
|
if foundStorageGroupInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume: %(volumeName)s is not currently "
|
"Volume: %(volumeName)s is not currently "
|
||||||
"belonging to any storage group."),
|
"belonging to any storage group.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -1539,10 +1538,10 @@ class VMAXCommon(object):
|
|||||||
# Check if migration is from compression to non compression
|
# Check if migration is from compression to non compression
|
||||||
# of vice versa
|
# of vice versa
|
||||||
if not doChangeCompression:
|
if not doChangeCompression:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"No action required. Volume: %(volumeName)s is "
|
"No action required. Volume: %(volumeName)s is "
|
||||||
"already part of slo/workload combination: "
|
"already part of slo/workload combination: "
|
||||||
"%(targetCombination)s."),
|
"%(targetCombination)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'targetCombination': targetCombination})
|
'targetCombination': targetCombination})
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1566,7 +1565,7 @@ class VMAXCommon(object):
|
|||||||
"""
|
"""
|
||||||
falseRet = (False, None, None)
|
falseRet = (False, None, None)
|
||||||
if 'location_info' not in host['capabilities']:
|
if 'location_info' not in host['capabilities']:
|
||||||
LOG.error(_LE("Error getting target pool name and array."))
|
LOG.error("Error getting target pool name and array.")
|
||||||
return falseRet
|
return falseRet
|
||||||
info = host['capabilities']['location_info']
|
info = host['capabilities']['location_info']
|
||||||
|
|
||||||
@ -1578,14 +1577,14 @@ class VMAXCommon(object):
|
|||||||
targetPoolName = infoDetail[1]
|
targetPoolName = infoDetail[1]
|
||||||
targetFastPolicy = infoDetail[2]
|
targetFastPolicy = infoDetail[2]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Error parsing target pool name, array, and fast policy."))
|
"Error parsing target pool name, array, and fast policy.")
|
||||||
|
|
||||||
if targetArraySerialNumber not in sourceArraySerialNumber:
|
if targetArraySerialNumber not in sourceArraySerialNumber:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"The source array : %(sourceArraySerialNumber)s does not "
|
"The source array : %(sourceArraySerialNumber)s does not "
|
||||||
"match the target array: %(targetArraySerialNumber)s, "
|
"match the target array: %(targetArraySerialNumber)s, "
|
||||||
"skipping storage-assisted migration."),
|
"skipping storage-assisted migration.",
|
||||||
{'sourceArraySerialNumber': sourceArraySerialNumber,
|
{'sourceArraySerialNumber': sourceArraySerialNumber,
|
||||||
'targetArraySerialNumber': targetArraySerialNumber})
|
'targetArraySerialNumber': targetArraySerialNumber})
|
||||||
return falseRet
|
return falseRet
|
||||||
@ -1597,19 +1596,19 @@ class VMAXCommon(object):
|
|||||||
assocPoolInstance = self.conn.GetInstance(
|
assocPoolInstance = self.conn.GetInstance(
|
||||||
assocPoolInstanceName)
|
assocPoolInstanceName)
|
||||||
if assocPoolInstance['ElementName'] == targetPoolName:
|
if assocPoolInstance['ElementName'] == targetPoolName:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"No action required. Volume: %(volumeName)s is "
|
"No action required. Volume: %(volumeName)s is "
|
||||||
"already part of pool: %(pool)s."),
|
"already part of pool: %(pool)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'pool': targetPoolName})
|
'pool': targetPoolName})
|
||||||
return falseRet
|
return falseRet
|
||||||
|
|
||||||
LOG.info(_LI("Volume status is: %s."), volumeStatus)
|
LOG.info("Volume status is: %s.", volumeStatus)
|
||||||
if (host['capabilities']['storage_protocol'] != self.protocol and
|
if (host['capabilities']['storage_protocol'] != self.protocol and
|
||||||
(volumeStatus != 'available' and volumeStatus != 'retyping')):
|
(volumeStatus != 'available' and volumeStatus != 'retyping')):
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Only available volumes can be migrated between "
|
"Only available volumes can be migrated between "
|
||||||
"different protocols."))
|
"different protocols.")
|
||||||
return falseRet
|
return falseRet
|
||||||
|
|
||||||
return (True, targetPoolName, targetFastPolicy)
|
return (True, targetPoolName, targetFastPolicy)
|
||||||
@ -1799,7 +1798,7 @@ class VMAXCommon(object):
|
|||||||
foundVolumeinstance['ElementName']):
|
foundVolumeinstance['ElementName']):
|
||||||
foundVolumeinstance = None
|
foundVolumeinstance = None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_LI("Exception in retrieving volume: %(e)s."),
|
LOG.info("Exception in retrieving volume: %(e)s.",
|
||||||
{'e': e})
|
{'e': e})
|
||||||
foundVolumeinstance = None
|
foundVolumeinstance = None
|
||||||
|
|
||||||
@ -1944,9 +1943,9 @@ class VMAXCommon(object):
|
|||||||
if not data:
|
if not data:
|
||||||
if len(maskedvols) > 0:
|
if len(maskedvols) > 0:
|
||||||
data = maskedvols[0]
|
data = maskedvols[0]
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume is masked but not to host %(host)s as is "
|
"Volume is masked but not to host %(host)s as is "
|
||||||
"expected. Assuming live migration."),
|
"expected. Assuming live migration.",
|
||||||
{'host': hoststr})
|
{'host': hoststr})
|
||||||
|
|
||||||
LOG.debug("Device info: %(data)s.", {'data': data})
|
LOG.debug("Device info: %(data)s.", {'data': data})
|
||||||
@ -1982,15 +1981,15 @@ class VMAXCommon(object):
|
|||||||
self.utils.get_target_endpoints(
|
self.utils.get_target_endpoints(
|
||||||
self.conn, hardwareIdInstance))
|
self.conn, hardwareIdInstance))
|
||||||
if not targetEndpoints:
|
if not targetEndpoints:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Unable to get target endpoints for hardwareId "
|
"Unable to get target endpoints for hardwareId "
|
||||||
"%(instance)s."),
|
"%(instance)s.",
|
||||||
{'instance': hardwareIdInstance})
|
{'instance': hardwareIdInstance})
|
||||||
continue
|
continue
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Unable to get target endpoints for hardwareId "
|
"Unable to get target endpoints for hardwareId "
|
||||||
"%(instance)s."),
|
"%(instance)s.",
|
||||||
{'instance': hardwareIdInstance}, exc_info=True)
|
{'instance': hardwareIdInstance}, exc_info=True)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -2447,9 +2446,9 @@ class VMAXCommon(object):
|
|||||||
volumeInstance.path, appendVolumeInstanceName, compositeType,
|
volumeInstance.path, appendVolumeInstanceName, compositeType,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to determine whether %(volumeName)s is "
|
"Unable to determine whether %(volumeName)s is "
|
||||||
"composite or not."),
|
"composite or not.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -2497,9 +2496,9 @@ class VMAXCommon(object):
|
|||||||
sourceName = sourceVolume['name']
|
sourceName = sourceVolume['name']
|
||||||
cloneName = cloneVolume['name']
|
cloneName = cloneVolume['name']
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Create a replica from Volume: Clone Volume: %(cloneName)s "
|
"Create a replica from Volume: Clone Volume: %(cloneName)s "
|
||||||
"Source Volume: %(sourceName)s."),
|
"Source Volume: %(sourceName)s.",
|
||||||
{'cloneName': cloneName,
|
{'cloneName': cloneName,
|
||||||
'sourceName': sourceName})
|
'sourceName': sourceName})
|
||||||
|
|
||||||
@ -2555,8 +2554,8 @@ class VMAXCommon(object):
|
|||||||
self.conn, sourceInstance))
|
self.conn, sourceInstance))
|
||||||
|
|
||||||
if cloneVolume['size'] != old_size_gbs:
|
if cloneVolume['size'] != old_size_gbs:
|
||||||
LOG.info(_LI("Extending clone %(cloneName)s to "
|
LOG.info("Extending clone %(cloneName)s to "
|
||||||
"%(newSize)d GBs"),
|
"%(newSize)d GBs",
|
||||||
{'cloneName': cloneName,
|
{'cloneName': cloneName,
|
||||||
'newSize': cloneVolume['size']})
|
'newSize': cloneVolume['size']})
|
||||||
cloneInstance = self.utils.find_volume_instance(
|
cloneInstance = self.utils.find_volume_instance(
|
||||||
@ -2638,9 +2637,9 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
volumeInstance = self._find_lun(volume)
|
volumeInstance = self._find_lun(volume)
|
||||||
if volumeInstance is None:
|
if volumeInstance is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Volume %(name)s not found on the array. "
|
"Volume %(name)s not found on the array. "
|
||||||
"No volume to delete."),
|
"No volume to delete.",
|
||||||
{'name': volumeName})
|
{'name': volumeName})
|
||||||
return errorRet
|
return errorRet
|
||||||
|
|
||||||
@ -2683,10 +2682,10 @@ class VMAXCommon(object):
|
|||||||
self.masking.get_associated_masking_groups_from_device(
|
self.masking.get_associated_masking_groups_from_device(
|
||||||
self.conn, volumeInstanceName))
|
self.conn, volumeInstanceName))
|
||||||
if storageGroupInstanceNames:
|
if storageGroupInstanceNames:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Pre check for deletion. "
|
"Pre check for deletion. "
|
||||||
"Volume: %(volumeName)s is part of a storage group. "
|
"Volume: %(volumeName)s is part of a storage group. "
|
||||||
"Attempting removal from %(storageGroupInstanceNames)s."),
|
"Attempting removal from %(storageGroupInstanceNames)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'storageGroupInstanceNames': storageGroupInstanceNames})
|
'storageGroupInstanceNames': storageGroupInstanceNames})
|
||||||
for storageGroupInstanceName in storageGroupInstanceNames:
|
for storageGroupInstanceName in storageGroupInstanceNames:
|
||||||
@ -2829,8 +2828,8 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
# Delete the target device.
|
# Delete the target device.
|
||||||
rc, snapshotname = self._delete_volume(snapshot, True, host)
|
rc, snapshotname = self._delete_volume(snapshot, True, host)
|
||||||
LOG.info(_LI("Leaving delete_snapshot: %(ssname)s Return code: "
|
LOG.info("Leaving delete_snapshot: %(ssname)s Return code: "
|
||||||
"%(rc)lu."),
|
"%(rc)lu.",
|
||||||
{'ssname': snapshotname,
|
{'ssname': snapshotname,
|
||||||
'rc': rc})
|
'rc': rc})
|
||||||
|
|
||||||
@ -2842,7 +2841,7 @@ class VMAXCommon(object):
|
|||||||
:returns: dict -- modelUpdate = {'status': 'available'}
|
:returns: dict -- modelUpdate = {'status': 'available'}
|
||||||
:raises: VolumeBackendAPIException
|
:raises: VolumeBackendAPIException
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Create Consistency Group: %(group)s."),
|
LOG.info("Create Consistency Group: %(group)s.",
|
||||||
{'group': group['id']})
|
{'group': group['id']})
|
||||||
|
|
||||||
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
|
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
|
||||||
@ -2876,7 +2875,7 @@ class VMAXCommon(object):
|
|||||||
:returns: list -- list of volume objects
|
:returns: list -- list of volume objects
|
||||||
:raises: VolumeBackendAPIException
|
:raises: VolumeBackendAPIException
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Delete Consistency Group: %(group)s."),
|
LOG.info("Delete Consistency Group: %(group)s.",
|
||||||
{'group': group['id']})
|
{'group': group['id']})
|
||||||
|
|
||||||
modelUpdate = {}
|
modelUpdate = {}
|
||||||
@ -2894,7 +2893,7 @@ class VMAXCommon(object):
|
|||||||
cgInstanceName, cgName = self._find_consistency_group(
|
cgInstanceName, cgName = self._find_consistency_group(
|
||||||
replicationService, six.text_type(group['id']))
|
replicationService, six.text_type(group['id']))
|
||||||
if cgInstanceName is None:
|
if cgInstanceName is None:
|
||||||
LOG.error(_LE("Cannot find CG group %(cgName)s."),
|
LOG.error("Cannot find CG group %(cgName)s.",
|
||||||
{'cgName': six.text_type(group['id'])})
|
{'cgName': six.text_type(group['id'])})
|
||||||
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
|
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
|
||||||
volumes_model_update = self.utils.get_volume_model_updates(
|
volumes_model_update = self.utils.get_volume_model_updates(
|
||||||
@ -2980,9 +2979,9 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
snapshots_model_update = []
|
snapshots_model_update = []
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Create snapshot for Consistency Group %(cgId)s "
|
"Create snapshot for Consistency Group %(cgId)s "
|
||||||
"cgsnapshotID: %(cgsnapshot)s."),
|
"cgsnapshotID: %(cgsnapshot)s.",
|
||||||
{'cgsnapshot': cgsnapshot['id'],
|
{'cgsnapshot': cgsnapshot['id'],
|
||||||
'cgId': cgsnapshot['consistencygroup_id']})
|
'cgId': cgsnapshot['consistencygroup_id']})
|
||||||
|
|
||||||
@ -3011,7 +3010,7 @@ class VMAXCommon(object):
|
|||||||
interval_retries_dict)
|
interval_retries_dict)
|
||||||
targetCgInstanceName, targetCgName = self._find_consistency_group(
|
targetCgInstanceName, targetCgName = self._find_consistency_group(
|
||||||
replicationService, cgsnapshot['id'])
|
replicationService, cgsnapshot['id'])
|
||||||
LOG.info(_LI("Create target consistency group %(targetCg)s."),
|
LOG.info("Create target consistency group %(targetCg)s.",
|
||||||
{'targetCg': targetCgInstanceName})
|
{'targetCg': targetCgInstanceName})
|
||||||
|
|
||||||
for snapshot in snapshots:
|
for snapshot in snapshots:
|
||||||
@ -3135,9 +3134,9 @@ class VMAXCommon(object):
|
|||||||
consistencyGroup = cgsnapshot.get('consistencygroup')
|
consistencyGroup = cgsnapshot.get('consistencygroup')
|
||||||
model_update = {}
|
model_update = {}
|
||||||
snapshots_model_update = []
|
snapshots_model_update = []
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Delete snapshot for source CG %(cgId)s "
|
"Delete snapshot for source CG %(cgId)s "
|
||||||
"cgsnapshotID: %(cgsnapshot)s."),
|
"cgsnapshotID: %(cgsnapshot)s.",
|
||||||
{'cgsnapshot': cgsnapshot['id'],
|
{'cgsnapshot': cgsnapshot['id'],
|
||||||
'cgId': cgsnapshot['consistencygroup_id']})
|
'cgId': cgsnapshot['consistencygroup_id']})
|
||||||
|
|
||||||
@ -3278,9 +3277,9 @@ class VMAXCommon(object):
|
|||||||
# add the volume to the default storage group created for
|
# add the volume to the default storage group created for
|
||||||
# volumes in pools associated with this fast policy.
|
# volumes in pools associated with this fast policy.
|
||||||
if extraSpecs[FASTPOLICY]:
|
if extraSpecs[FASTPOLICY]:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Adding volume: %(volumeName)s to default storage group"
|
"Adding volume: %(volumeName)s to default storage group"
|
||||||
" for FAST policy: %(fastPolicyName)s."),
|
" for FAST policy: %(fastPolicyName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'fastPolicyName': extraSpecs[FASTPOLICY]})
|
'fastPolicyName': extraSpecs[FASTPOLICY]})
|
||||||
defaultStorageGroupInstanceName = (
|
defaultStorageGroupInstanceName = (
|
||||||
@ -3551,9 +3550,9 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
storageSystemName = volumeInstance['SystemName']
|
storageSystemName = volumeInstance['SystemName']
|
||||||
if not isValid:
|
if not isValid:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Volume %(name)s is not suitable for storage "
|
"Volume %(name)s is not suitable for storage "
|
||||||
"assisted migration using retype."),
|
"assisted migration using retype.",
|
||||||
{'name': volumeName})
|
{'name': volumeName})
|
||||||
return False
|
return False
|
||||||
if volume['host'] != host['host'] or doChangeCompression:
|
if volume['host'] != host['host'] or doChangeCompression:
|
||||||
@ -3601,9 +3600,9 @@ class VMAXCommon(object):
|
|||||||
self.utils.get_storage_group_from_volume(
|
self.utils.get_storage_group_from_volume(
|
||||||
self.conn, volumeInstance.path, defaultSgName))
|
self.conn, volumeInstance.path, defaultSgName))
|
||||||
if foundStorageGroupInstanceName is None:
|
if foundStorageGroupInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume : %(volumeName)s is not currently "
|
"Volume : %(volumeName)s is not currently "
|
||||||
"belonging to any storage group."),
|
"belonging to any storage group.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
else:
|
else:
|
||||||
self.masking.remove_and_reset_members(
|
self.masking.remove_and_reset_members(
|
||||||
@ -3621,8 +3620,8 @@ class VMAXCommon(object):
|
|||||||
poolName, targetSlo, targetWorkload, isCompressionDisabled,
|
poolName, targetSlo, targetWorkload, isCompressionDisabled,
|
||||||
storageSystemName, extraSpecs)
|
storageSystemName, extraSpecs)
|
||||||
if targetSgInstanceName is None:
|
if targetSgInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to get or create storage group %(storageGroupName)s."),
|
"Failed to get or create storage group %(storageGroupName)s.",
|
||||||
{'storageGroupName': storageGroupName})
|
{'storageGroupName': storageGroupName})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -3634,9 +3633,9 @@ class VMAXCommon(object):
|
|||||||
self.utils.get_storage_group_from_volume(
|
self.utils.get_storage_group_from_volume(
|
||||||
self.conn, volumeInstance.path, storageGroupName))
|
self.conn, volumeInstance.path, storageGroupName))
|
||||||
if sgFromVolAddedInstanceName is None:
|
if sgFromVolAddedInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Volume : %(volumeName)s has not been "
|
"Volume : %(volumeName)s has not been "
|
||||||
"added to target storage group %(storageGroup)s."),
|
"added to target storage group %(storageGroup)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'storageGroup': targetSgInstanceName})
|
'storageGroup': targetSgInstanceName})
|
||||||
return False
|
return False
|
||||||
@ -3665,9 +3664,9 @@ class VMAXCommon(object):
|
|||||||
volumeName, volumeStatus))
|
volumeName, volumeStatus))
|
||||||
|
|
||||||
if not isValid:
|
if not isValid:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Volume %(name)s is not suitable for storage "
|
"Volume %(name)s is not suitable for storage "
|
||||||
"assisted migration using retype."),
|
"assisted migration using retype.",
|
||||||
{'name': volumeName})
|
{'name': volumeName})
|
||||||
return False
|
return False
|
||||||
if volume['host'] != host['host']:
|
if volume['host'] != host['host']:
|
||||||
@ -3718,10 +3717,10 @@ class VMAXCommon(object):
|
|||||||
self.fast.get_capacities_associated_to_policy(
|
self.fast.get_capacities_associated_to_policy(
|
||||||
self.conn, arrayInfo['SerialNumber'],
|
self.conn, arrayInfo['SerialNumber'],
|
||||||
arrayInfo['FastPolicy']))
|
arrayInfo['FastPolicy']))
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"FAST: capacity stats for policy %(fastPolicyName)s on array "
|
"FAST: capacity stats for policy %(fastPolicyName)s on array "
|
||||||
"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, "
|
"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, "
|
||||||
"free_capacity_gb=%(free_capacity_gb)lu."),
|
"free_capacity_gb=%(free_capacity_gb)lu.",
|
||||||
{'fastPolicyName': arrayInfo['FastPolicy'],
|
{'fastPolicyName': arrayInfo['FastPolicy'],
|
||||||
'arrayName': arrayInfo['SerialNumber'],
|
'arrayName': arrayInfo['SerialNumber'],
|
||||||
'total_capacity_gb': total_capacity_gb,
|
'total_capacity_gb': total_capacity_gb,
|
||||||
@ -3732,10 +3731,10 @@ class VMAXCommon(object):
|
|||||||
self.utils.get_pool_capacities(self.conn,
|
self.utils.get_pool_capacities(self.conn,
|
||||||
arrayInfo['PoolName'],
|
arrayInfo['PoolName'],
|
||||||
arrayInfo['SerialNumber']))
|
arrayInfo['SerialNumber']))
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"NON-FAST: capacity stats for pool %(poolName)s on array "
|
"NON-FAST: capacity stats for pool %(poolName)s on array "
|
||||||
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
|
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
|
||||||
"free_capacity_gb=%(free_capacity_gb)lu."),
|
"free_capacity_gb=%(free_capacity_gb)lu.",
|
||||||
{'poolName': arrayInfo['PoolName'],
|
{'poolName': arrayInfo['PoolName'],
|
||||||
'arrayName': arrayInfo['SerialNumber'],
|
'arrayName': arrayInfo['SerialNumber'],
|
||||||
'total_capacity_gb': total_capacity_gb,
|
'total_capacity_gb': total_capacity_gb,
|
||||||
@ -3813,8 +3812,8 @@ class VMAXCommon(object):
|
|||||||
sloFromExtraSpec = poolDetails[0]
|
sloFromExtraSpec = poolDetails[0]
|
||||||
workloadFromExtraSpec = poolDetails[1]
|
workloadFromExtraSpec = poolDetails[1]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(_LE("Error parsing SLO, workload from "
|
LOG.error("Error parsing SLO, workload from "
|
||||||
"the provided extra_specs."))
|
"the provided extra_specs.")
|
||||||
else:
|
else:
|
||||||
# Throw an exception as it is compulsory to have
|
# Throw an exception as it is compulsory to have
|
||||||
# pool_name in the extra specs
|
# pool_name in the extra specs
|
||||||
@ -3904,10 +3903,10 @@ class VMAXCommon(object):
|
|||||||
volumeInstance.path, volumeName, fastPolicyName,
|
volumeInstance.path, volumeName, fastPolicyName,
|
||||||
extraSpecs))
|
extraSpecs))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"The volume: %(volumename)s. was not first part of the "
|
"The volume: %(volumename)s. was not first part of the "
|
||||||
"default storage group for FAST policy %(fastPolicyName)s"
|
"default storage group for FAST policy %(fastPolicyName)s"
|
||||||
"."),
|
".",
|
||||||
{'volumename': volumeName,
|
{'volumename': volumeName,
|
||||||
'fastPolicyName': fastPolicyName})
|
'fastPolicyName': fastPolicyName})
|
||||||
# Check if it is part of another storage group.
|
# Check if it is part of another storage group.
|
||||||
@ -3946,12 +3945,12 @@ class VMAXCommon(object):
|
|||||||
volumeInstance, volumeName, fastPolicyName,
|
volumeInstance, volumeName, fastPolicyName,
|
||||||
extraSpecs))
|
extraSpecs))
|
||||||
if assocDefaultStorageGroupName is None:
|
if assocDefaultStorageGroupName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to Roll back to re-add volume %(volumeName)s "
|
"Failed to Roll back to re-add volume %(volumeName)s "
|
||||||
"to default storage group for fast policy "
|
"to default storage group for fast policy "
|
||||||
"%(fastPolicyName)s. Please contact your sysadmin to "
|
"%(fastPolicyName)s. Please contact your sysadmin to "
|
||||||
"get the volume returned to the default "
|
"get the volume returned to the default "
|
||||||
"storage group."),
|
"storage group.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'fastPolicyName': fastPolicyName})
|
'fastPolicyName': fastPolicyName})
|
||||||
|
|
||||||
@ -4208,8 +4207,8 @@ class VMAXCommon(object):
|
|||||||
self._add_clone_to_default_storage_group(
|
self._add_clone_to_default_storage_group(
|
||||||
fastPolicyName, storageSystemName, cloneDict, cloneName,
|
fastPolicyName, storageSystemName, cloneDict, cloneName,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
LOG.info(_LI("Snapshot creation %(cloneName)s completed. "
|
LOG.info("Snapshot creation %(cloneName)s completed. "
|
||||||
"Source Volume: %(sourceName)s."),
|
"Source Volume: %(sourceName)s.",
|
||||||
{'cloneName': cloneName,
|
{'cloneName': cloneName,
|
||||||
'sourceName': sourceName})
|
'sourceName': sourceName})
|
||||||
|
|
||||||
@ -4246,8 +4245,8 @@ class VMAXCommon(object):
|
|||||||
if mvInstanceName is not None:
|
if mvInstanceName is not None:
|
||||||
targetWwns = self.masking.get_target_wwns(
|
targetWwns = self.masking.get_target_wwns(
|
||||||
self.conn, mvInstanceName)
|
self.conn, mvInstanceName)
|
||||||
LOG.info(_LI("Target wwns in masking view %(maskingView)s: "
|
LOG.info("Target wwns in masking view %(maskingView)s: "
|
||||||
"%(targetWwns)s."),
|
"%(targetWwns)s.",
|
||||||
{'maskingView': mvInstanceName,
|
{'maskingView': mvInstanceName,
|
||||||
'targetWwns': six.text_type(targetWwns)})
|
'targetWwns': six.text_type(targetWwns)})
|
||||||
return targetWwns
|
return targetWwns
|
||||||
@ -4347,9 +4346,9 @@ class VMAXCommon(object):
|
|||||||
sourceInstance, extraSpecs, targetInstance, rsdInstance,
|
sourceInstance, extraSpecs, targetInstance, rsdInstance,
|
||||||
copyState))
|
copyState))
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Clone failed on V3. Cleaning up the target volume. "
|
"Clone failed on V3. Cleaning up the target volume. "
|
||||||
"Clone name: %(cloneName)s "),
|
"Clone name: %(cloneName)s ",
|
||||||
{'cloneName': cloneName})
|
{'cloneName': cloneName})
|
||||||
if targetInstance:
|
if targetInstance:
|
||||||
self._cleanup_target(
|
self._cleanup_target(
|
||||||
@ -4361,7 +4360,7 @@ class VMAXCommon(object):
|
|||||||
self.conn, job['Job'])
|
self.conn, job['Job'])
|
||||||
targetVolumeInstance = (
|
targetVolumeInstance = (
|
||||||
self.provisionv3.get_volume_from_job(self.conn, job['Job']))
|
self.provisionv3.get_volume_from_job(self.conn, job['Job']))
|
||||||
LOG.info(_LI("The target instance device id is: %(deviceid)s."),
|
LOG.info("The target instance device id is: %(deviceid)s.",
|
||||||
{'deviceid': targetVolumeInstance['DeviceID']})
|
{'deviceid': targetVolumeInstance['DeviceID']})
|
||||||
|
|
||||||
if not isSnapshot:
|
if not isSnapshot:
|
||||||
@ -4426,7 +4425,7 @@ class VMAXCommon(object):
|
|||||||
replicationService, six.text_type(cgsnapshot['id']))
|
replicationService, six.text_type(cgsnapshot['id']))
|
||||||
|
|
||||||
if cgInstanceName is None:
|
if cgInstanceName is None:
|
||||||
LOG.error(_LE("Cannot find CG group %(cgName)s."),
|
LOG.error("Cannot find CG group %(cgName)s.",
|
||||||
{'cgName': cgsnapshot['id']})
|
{'cgName': cgsnapshot['id']})
|
||||||
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
|
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
|
||||||
return modelUpdate, []
|
return modelUpdate, []
|
||||||
@ -4579,8 +4578,8 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
# Manage existing volume is not supported if fast enabled.
|
# Manage existing volume is not supported if fast enabled.
|
||||||
if extraSpecs[FASTPOLICY]:
|
if extraSpecs[FASTPOLICY]:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"FAST is enabled. Policy: %(fastPolicyName)s."),
|
"FAST is enabled. Policy: %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': extraSpecs[FASTPOLICY]})
|
{'fastPolicyName': extraSpecs[FASTPOLICY]})
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Manage volume is not supported if FAST is enable. "
|
"Manage volume is not supported if FAST is enable. "
|
||||||
@ -4743,8 +4742,8 @@ class VMAXCommon(object):
|
|||||||
:param remove_volumes: the volumes uuids you want to remove from
|
:param remove_volumes: the volumes uuids you want to remove from
|
||||||
the CG
|
the CG
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Update Consistency Group: %(group)s. "
|
LOG.info("Update Consistency Group: %(group)s. "
|
||||||
"This adds and/or removes volumes from a CG."),
|
"This adds and/or removes volumes from a CG.",
|
||||||
{'group': group['id']})
|
{'group': group['id']})
|
||||||
|
|
||||||
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
|
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
|
||||||
@ -4780,7 +4779,7 @@ class VMAXCommon(object):
|
|||||||
except exception.ConsistencyGroupNotFound:
|
except exception.ConsistencyGroupNotFound:
|
||||||
raise
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
|
LOG.error("Exception: %(ex)s", {'ex': ex})
|
||||||
exceptionMessage = (_("Failed to update consistency group:"
|
exceptionMessage = (_("Failed to update consistency group:"
|
||||||
" %(cgName)s.")
|
" %(cgName)s.")
|
||||||
% {'cgName': group['id']})
|
% {'cgName': group['id']})
|
||||||
@ -4799,7 +4798,7 @@ class VMAXCommon(object):
|
|||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
volumeInstance = self._find_lun(volume)
|
volumeInstance = self._find_lun(volume)
|
||||||
if volumeInstance is None:
|
if volumeInstance is None:
|
||||||
LOG.error(_LE("Volume %(name)s not found on the array."),
|
LOG.error("Volume %(name)s not found on the array.",
|
||||||
{'name': volume['name']})
|
{'name': volume['name']})
|
||||||
else:
|
else:
|
||||||
volumeInstanceNames.append(volumeInstance.path)
|
volumeInstanceNames.append(volumeInstance.path)
|
||||||
@ -5136,14 +5135,14 @@ class VMAXCommon(object):
|
|||||||
extraSpecsDictList = []
|
extraSpecsDictList = []
|
||||||
isV3 = False
|
isV3 = False
|
||||||
|
|
||||||
if isinstance(group, Group):
|
if isinstance(group, group_obj.Group):
|
||||||
for volume_type in group.volume_types:
|
for volume_type in group.volume_types:
|
||||||
extraSpecsDict, storageSystems, isV3 = (
|
extraSpecsDict, storageSystems, isV3 = (
|
||||||
self._update_extra_specs_list(
|
self._update_extra_specs_list(
|
||||||
volume_type.extra_specs, len(group.volume_types),
|
volume_type.extra_specs, len(group.volume_types),
|
||||||
volume_type.id))
|
volume_type.id))
|
||||||
extraSpecsDictList.append(extraSpecsDict)
|
extraSpecsDictList.append(extraSpecsDict)
|
||||||
elif isinstance(group, ConsistencyGroup):
|
elif isinstance(group, cg_obj.ConsistencyGroup):
|
||||||
volumeTypeIds = group.volume_type_id.split(",")
|
volumeTypeIds = group.volume_type_id.split(",")
|
||||||
volumeTypeIds = list(filter(None, volumeTypeIds))
|
volumeTypeIds = list(filter(None, volumeTypeIds))
|
||||||
for volumeTypeId in volumeTypeIds:
|
for volumeTypeId in volumeTypeIds:
|
||||||
@ -5321,7 +5320,7 @@ class VMAXCommon(object):
|
|||||||
sourceVolume, sourceInstance, targetInstance, extraSpecs,
|
sourceVolume, sourceInstance, targetInstance, extraSpecs,
|
||||||
self.rep_config)
|
self.rep_config)
|
||||||
|
|
||||||
LOG.info(_LI('Successfully setup replication for %s.'),
|
LOG.info('Successfully setup replication for %s.',
|
||||||
sourceVolume['name'])
|
sourceVolume['name'])
|
||||||
replication_status = REPLICATION_ENABLED
|
replication_status = REPLICATION_ENABLED
|
||||||
replication_driver_data = rdfDict['keybindings']
|
replication_driver_data = rdfDict['keybindings']
|
||||||
@ -5378,19 +5377,19 @@ class VMAXCommon(object):
|
|||||||
self._cleanup_remote_target(
|
self._cleanup_remote_target(
|
||||||
conn, repServiceInstanceName, sourceInstance,
|
conn, repServiceInstanceName, sourceInstance,
|
||||||
targetInstance, extraSpecs, repExtraSpecs)
|
targetInstance, extraSpecs, repExtraSpecs)
|
||||||
LOG.info(_LI('Successfully destroyed replication for '
|
LOG.info('Successfully destroyed replication for '
|
||||||
'volume: %(volume)s'),
|
'volume: %(volume)s',
|
||||||
{'volume': volumeName})
|
{'volume': volumeName})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Replication target not found for '
|
LOG.warning('Replication target not found for '
|
||||||
'replication-enabled volume: %(volume)s'),
|
'replication-enabled volume: %(volume)s',
|
||||||
{'volume': volumeName})
|
{'volume': volumeName})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Cannot get necessary information to cleanup '
|
LOG.error('Cannot get necessary information to cleanup '
|
||||||
'replication target for volume: %(volume)s. '
|
'replication target for volume: %(volume)s. '
|
||||||
'The exception received was: %(e)s. Manual '
|
'The exception received was: %(e)s. Manual '
|
||||||
'clean-up may be required. Please contact '
|
'clean-up may be required. Please contact '
|
||||||
'your administrator.'),
|
'your administrator.',
|
||||||
{'volume': volumeName, 'e': e})
|
{'volume': volumeName, 'e': e})
|
||||||
|
|
||||||
def _cleanup_remote_target(
|
def _cleanup_remote_target(
|
||||||
@ -5438,9 +5437,9 @@ class VMAXCommon(object):
|
|||||||
:param volumeDict: the source volume dictionary
|
:param volumeDict: the source volume dictionary
|
||||||
:param extraSpecs: the extra specifications
|
:param extraSpecs: the extra specifications
|
||||||
"""
|
"""
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Replication failed. Cleaning up the source volume. "
|
"Replication failed. Cleaning up the source volume. "
|
||||||
"Volume name: %(sourceName)s "),
|
"Volume name: %(sourceName)s.",
|
||||||
{'sourceName': volumeName})
|
{'sourceName': volumeName})
|
||||||
sourceInstance = self.utils.find_volume_instance(
|
sourceInstance = self.utils.find_volume_instance(
|
||||||
conn, volumeDict, volumeName)
|
conn, volumeDict, volumeName)
|
||||||
@ -5484,11 +5483,11 @@ class VMAXCommon(object):
|
|||||||
repServiceInstanceName = self.utils.find_replication_service(
|
repServiceInstanceName = self.utils.find_replication_service(
|
||||||
conn, storageSystem)
|
conn, storageSystem)
|
||||||
RDFGroupName = self.rep_config['rdf_group_label']
|
RDFGroupName = self.rep_config['rdf_group_label']
|
||||||
LOG.info(_LI("Replication group: %(RDFGroup)s."),
|
LOG.info("Replication group: %(RDFGroup)s.",
|
||||||
{'RDFGroup': RDFGroupName})
|
{'RDFGroup': RDFGroupName})
|
||||||
rdfGroupInstance = self.provisionv3.get_rdf_group_instance(
|
rdfGroupInstance = self.provisionv3.get_rdf_group_instance(
|
||||||
conn, repServiceInstanceName, RDFGroupName)
|
conn, repServiceInstanceName, RDFGroupName)
|
||||||
LOG.info(_LI("Found RDF group instance: %(RDFGroup)s."),
|
LOG.info("Found RDF group instance: %(RDFGroup)s.",
|
||||||
{'RDFGroup': rdfGroupInstance})
|
{'RDFGroup': rdfGroupInstance})
|
||||||
if rdfGroupInstance is None:
|
if rdfGroupInstance is None:
|
||||||
exception_message = (_("Cannot find replication group: "
|
exception_message = (_("Cannot find replication group: "
|
||||||
@ -5597,11 +5596,10 @@ class VMAXCommon(object):
|
|||||||
rep_data = six.text_type(replication_driver_data)
|
rep_data = six.text_type(replication_driver_data)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = _LE(
|
LOG.error(
|
||||||
'Failed to failover volume %(volume_id)s. '
|
'Failed to failover volume %(volume_id)s. '
|
||||||
'Error: %(error)s.')
|
'Error: %(error)s.',
|
||||||
LOG.error(msg, {'volume_id': vol['id'],
|
{'volume_id': vol['id'], 'error': ex})
|
||||||
'error': ex}, )
|
|
||||||
new_status = FAILOVER_ERROR
|
new_status = FAILOVER_ERROR
|
||||||
|
|
||||||
model_update = {'volume_id': vol['id'],
|
model_update = {'volume_id': vol['id'],
|
||||||
@ -5628,7 +5626,7 @@ class VMAXCommon(object):
|
|||||||
recovery = self.recover_volumes_on_failback(volume)
|
recovery = self.recover_volumes_on_failback(volume)
|
||||||
volume_update_list.append(recovery)
|
volume_update_list.append(recovery)
|
||||||
|
|
||||||
LOG.info(_LI("Failover host complete"))
|
LOG.info("Failover host complete")
|
||||||
|
|
||||||
return secondary_id, volume_update_list
|
return secondary_id, volume_update_list
|
||||||
|
|
||||||
@ -5733,24 +5731,24 @@ class VMAXCommon(object):
|
|||||||
targetVolumeInstance, volumeName, repExtraSpecs,
|
targetVolumeInstance, volumeName, repExtraSpecs,
|
||||||
None, False)
|
None, False)
|
||||||
|
|
||||||
LOG.info(_LI("Breaking replication relationship..."))
|
LOG.info("Breaking replication relationship...")
|
||||||
self.break_rdf_relationship(
|
self.break_rdf_relationship(
|
||||||
self.conn, repServiceInstanceName,
|
self.conn, repServiceInstanceName,
|
||||||
storageSynchronizationSv, extraSpecs)
|
storageSynchronizationSv, extraSpecs)
|
||||||
|
|
||||||
# extend the source volume
|
# extend the source volume
|
||||||
|
|
||||||
LOG.info(_LI("Extending source volume..."))
|
LOG.info("Extending source volume...")
|
||||||
rc, volumeDict = self._extend_v3_volume(
|
rc, volumeDict = self._extend_v3_volume(
|
||||||
volumeInstance, volumeName, newSize, extraSpecs)
|
volumeInstance, volumeName, newSize, extraSpecs)
|
||||||
|
|
||||||
# extend the target volume
|
# extend the target volume
|
||||||
LOG.info(_LI("Extending target volume..."))
|
LOG.info("Extending target volume...")
|
||||||
self._extend_v3_volume(targetVolumeInstance, volumeName,
|
self._extend_v3_volume(targetVolumeInstance, volumeName,
|
||||||
newSize, repExtraSpecs)
|
newSize, repExtraSpecs)
|
||||||
|
|
||||||
# re-create replication relationship
|
# re-create replication relationship
|
||||||
LOG.info(_LI("Recreating replication relationship..."))
|
LOG.info("Recreating replication relationship...")
|
||||||
self.setup_volume_replication(
|
self.setup_volume_replication(
|
||||||
self.conn, volume, volumeDict,
|
self.conn, volume, volumeDict,
|
||||||
extraSpecs, targetVolumeInstance)
|
extraSpecs, targetVolumeInstance)
|
||||||
@ -5826,9 +5824,9 @@ class VMAXCommon(object):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Remote replication failed. Cleaning up the target "
|
"Remote replication failed. Cleaning up the target "
|
||||||
"volume and returning source volume to default storage "
|
"volume and returning source volume to default storage "
|
||||||
"group. Volume name: %(cloneName)s "),
|
"group. Volume name: %(cloneName)s ",
|
||||||
{'cloneName': volumeName})
|
{'cloneName': volumeName})
|
||||||
|
|
||||||
self._cleanup_remote_target(
|
self._cleanup_remote_target(
|
||||||
@ -5958,10 +5956,10 @@ class VMAXCommon(object):
|
|||||||
extraSpecs[WORKLOAD])
|
extraSpecs[WORKLOAD])
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("The target array does not support the storage "
|
"The target array does not support the storage "
|
||||||
"pool setting for SLO %(slo)s or workload "
|
"pool setting for SLO %(slo)s or workload "
|
||||||
"%(workload)s. Not assigning any SLO or "
|
"%(workload)s. Not assigning any SLO or "
|
||||||
"workload."),
|
"workload.",
|
||||||
{'slo': extraSpecs[SLO],
|
{'slo': extraSpecs[SLO],
|
||||||
'workload': extraSpecs[WORKLOAD]})
|
'workload': extraSpecs[WORKLOAD]})
|
||||||
repExtraSpecs[SLO] = None
|
repExtraSpecs[SLO] = None
|
||||||
@ -5969,9 +5967,9 @@ class VMAXCommon(object):
|
|||||||
repExtraSpecs[WORKLOAD] = None
|
repExtraSpecs[WORKLOAD] = None
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Cannot determine storage pool settings of "
|
LOG.warning("Cannot determine storage pool settings of "
|
||||||
"target array. Not assigning any SLO or "
|
"target array. Not assigning any SLO or "
|
||||||
"workload"))
|
"workload")
|
||||||
repExtraSpecs[SLO] = None
|
repExtraSpecs[SLO] = None
|
||||||
if extraSpecs[WORKLOAD]:
|
if extraSpecs[WORKLOAD]:
|
||||||
repExtraSpecs[WORKLOAD] = None
|
repExtraSpecs[WORKLOAD] = None
|
||||||
@ -6004,9 +6002,9 @@ class VMAXCommon(object):
|
|||||||
arrayInfo['Workload'])
|
arrayInfo['Workload'])
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("The target array does not support the storage "
|
"The target array does not support the storage "
|
||||||
"pool setting for SLO %(slo)s or workload "
|
"pool setting for SLO %(slo)s or workload "
|
||||||
"%(workload)s. SLO stats will not be reported."),
|
"%(workload)s. SLO stats will not be reported.",
|
||||||
{'slo': arrayInfo['SLO'],
|
{'slo': arrayInfo['SLO'],
|
||||||
'workload': arrayInfo['Workload']})
|
'workload': arrayInfo['Workload']})
|
||||||
secondaryInfo['SLO'] = None
|
secondaryInfo['SLO'] = None
|
||||||
@ -6016,8 +6014,8 @@ class VMAXCommon(object):
|
|||||||
self.multiPoolSupportEnabled = False
|
self.multiPoolSupportEnabled = False
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Cannot determine storage pool settings of "
|
LOG.info("Cannot determine storage pool settings of "
|
||||||
"target array. SLO stats will not be reported."))
|
"target array. SLO stats will not be reported.")
|
||||||
secondaryInfo['SLO'] = None
|
secondaryInfo['SLO'] = None
|
||||||
if arrayInfo['Workload']:
|
if arrayInfo['Workload']:
|
||||||
secondaryInfo['Workload'] = None
|
secondaryInfo['Workload'] = None
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vmax import provision
|
from cinder.volume.drivers.dell_emc.vmax import provision
|
||||||
from cinder.volume.drivers.dell_emc.vmax import utils
|
from cinder.volume.drivers.dell_emc.vmax import utils
|
||||||
|
|
||||||
@ -50,12 +50,11 @@ class VMAXFast(object):
|
|||||||
isTieringPolicySupported = self.is_tiering_policy_enabled(
|
isTieringPolicySupported = self.is_tiering_policy_enabled(
|
||||||
conn, tierPolicyServiceInstanceName)
|
conn, tierPolicyServiceInstanceName)
|
||||||
if isTieringPolicySupported is None:
|
if isTieringPolicySupported is None:
|
||||||
LOG.error(_LE("Cannot determine whether "
|
LOG.error("Cannot determine whether "
|
||||||
"Tiering Policy is supported on this array."))
|
"Tiering Policy is supported on this array.")
|
||||||
|
|
||||||
if isTieringPolicySupported is False:
|
if isTieringPolicySupported is False:
|
||||||
LOG.error(_LE("Tiering Policy is not "
|
LOG.error("Tiering Policy is not supported on this array.")
|
||||||
"supported on this array."))
|
|
||||||
return isTieringPolicySupported
|
return isTieringPolicySupported
|
||||||
|
|
||||||
def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName):
|
def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName):
|
||||||
@ -87,8 +86,8 @@ class VMAXFast(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if foundIsSupportsTieringPolicies is None:
|
if foundIsSupportsTieringPolicies is None:
|
||||||
LOG.error(_LE("Cannot determine if Tiering Policies "
|
LOG.error("Cannot determine if Tiering Policies "
|
||||||
"are supported."))
|
"are supported.")
|
||||||
|
|
||||||
return foundIsSupportsTieringPolicies
|
return foundIsSupportsTieringPolicies
|
||||||
|
|
||||||
@ -113,8 +112,7 @@ class VMAXFast(object):
|
|||||||
conn, controllerConfigService)
|
conn, controllerConfigService)
|
||||||
|
|
||||||
if not self._check_if_fast_supported(conn, storageSystemInstanceName):
|
if not self._check_if_fast_supported(conn, storageSystemInstanceName):
|
||||||
LOG.error(_LE(
|
LOG.error("FAST is not supported on this array.")
|
||||||
"FAST is not supported on this array."))
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
defaultSgName = self.format_default_sg_string(fastPolicyName)
|
defaultSgName = self.format_default_sg_string(fastPolicyName)
|
||||||
@ -127,9 +125,9 @@ class VMAXFast(object):
|
|||||||
controllerConfigService,
|
controllerConfigService,
|
||||||
defaultSgName))
|
defaultSgName))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to find default storage group "
|
"Unable to find default storage group "
|
||||||
"for FAST policy : %(fastPolicyName)s."),
|
"for FAST policy : %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': fastPolicyName})
|
{'fastPolicyName': fastPolicyName})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -137,9 +135,9 @@ class VMAXFast(object):
|
|||||||
foundDefaultStorageGroupInstanceName = (
|
foundDefaultStorageGroupInstanceName = (
|
||||||
assocStorageGroupInstanceName)
|
assocStorageGroupInstanceName)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume: %(volumeName)s Does not belong "
|
"Volume: %(volumeName)s Does not belong "
|
||||||
"to storage group %(defaultSgName)s."),
|
"to storage group %(defaultSgName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'defaultSgName': defaultSgName})
|
'defaultSgName': defaultSgName})
|
||||||
return foundDefaultStorageGroupInstanceName, defaultSgName
|
return foundDefaultStorageGroupInstanceName, defaultSgName
|
||||||
@ -177,8 +175,8 @@ class VMAXFast(object):
|
|||||||
storageGroupInstanceName = self.utils.find_storage_masking_group(
|
storageGroupInstanceName = self.utils.find_storage_masking_group(
|
||||||
conn, controllerConfigService, defaultSgName)
|
conn, controllerConfigService, defaultSgName)
|
||||||
if storageGroupInstanceName is None:
|
if storageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to get default storage group %(defaultSgName)s."),
|
"Unable to get default storage group %(defaultSgName)s.",
|
||||||
{'defaultSgName': defaultSgName})
|
{'defaultSgName': defaultSgName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -214,9 +212,9 @@ class VMAXFast(object):
|
|||||||
firstVolumeInstance = self._create_volume_for_default_volume_group(
|
firstVolumeInstance = self._create_volume_for_default_volume_group(
|
||||||
conn, controllerConfigService, volumeInstance.path, extraSpecs)
|
conn, controllerConfigService, volumeInstance.path, extraSpecs)
|
||||||
if firstVolumeInstance is None:
|
if firstVolumeInstance is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to create a first volume for storage "
|
"Failed to create a first volume for storage "
|
||||||
"group : %(storageGroupName)s."),
|
"group : %(storageGroupName)s.",
|
||||||
{'storageGroupName': storageGroupName})
|
{'storageGroupName': storageGroupName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -225,9 +223,9 @@ class VMAXFast(object):
|
|||||||
conn, controllerConfigService, storageGroupName,
|
conn, controllerConfigService, storageGroupName,
|
||||||
firstVolumeInstance.path, extraSpecs))
|
firstVolumeInstance.path, extraSpecs))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to create default storage group for "
|
"Failed to create default storage group for "
|
||||||
"FAST policy : %(fastPolicyName)s."),
|
"FAST policy : %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': fastPolicyName})
|
{'fastPolicyName': fastPolicyName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -240,9 +238,9 @@ class VMAXFast(object):
|
|||||||
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
|
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
|
||||||
conn, tierPolicyServiceInstanceName, fastPolicyName)
|
conn, tierPolicyServiceInstanceName, fastPolicyName)
|
||||||
if tierPolicyRuleInstanceName is None:
|
if tierPolicyRuleInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to get policy rule for fast policy: "
|
"Unable to get policy rule for fast policy: "
|
||||||
"%(fastPolicyName)s."),
|
"%(fastPolicyName)s.",
|
||||||
{'fastPolicyName': fastPolicyName})
|
{'fastPolicyName': fastPolicyName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -280,7 +278,7 @@ class VMAXFast(object):
|
|||||||
poolInstanceName = self.utils.get_assoc_pool_from_volume(
|
poolInstanceName = self.utils.get_assoc_pool_from_volume(
|
||||||
conn, volumeInstanceName)
|
conn, volumeInstanceName)
|
||||||
if poolInstanceName is None:
|
if poolInstanceName is None:
|
||||||
LOG.error(_LE("Unable to get associated pool of volume."))
|
LOG.error("Unable to get associated pool of volume.")
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
volumeName = 'vol1'
|
volumeName = 'vol1'
|
||||||
@ -408,8 +406,8 @@ class VMAXFast(object):
|
|||||||
|
|
||||||
if len(storageTierInstanceNames) == 0:
|
if len(storageTierInstanceNames) == 0:
|
||||||
storageTierInstanceNames = None
|
storageTierInstanceNames = None
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Unable to get storage tiers from tier policy rule."))
|
"Unable to get storage tiers from tier policy rule.")
|
||||||
|
|
||||||
return storageTierInstanceNames
|
return storageTierInstanceNames
|
||||||
|
|
||||||
@ -503,8 +501,8 @@ class VMAXFast(object):
|
|||||||
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
|
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
|
||||||
conn, tierPolicyServiceInstanceName, fastPolicyName)
|
conn, tierPolicyServiceInstanceName, fastPolicyName)
|
||||||
if tierPolicyRuleInstanceName is None:
|
if tierPolicyRuleInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot find the fast policy %(fastPolicyName)s."),
|
"Cannot find the fast policy %(fastPolicyName)s.",
|
||||||
{'fastPolicyName': fastPolicyName})
|
{'fastPolicyName': fastPolicyName})
|
||||||
return failedRet
|
return failedRet
|
||||||
else:
|
else:
|
||||||
@ -521,9 +519,9 @@ class VMAXFast(object):
|
|||||||
storageGroupInstanceName, tierPolicyRuleInstanceName,
|
storageGroupInstanceName, tierPolicyRuleInstanceName,
|
||||||
storageGroupName, fastPolicyName, extraSpecs)
|
storageGroupName, fastPolicyName, extraSpecs)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE(
|
LOG.exception(
|
||||||
"Failed to add storage group %(storageGroupInstanceName)s "
|
"Failed to add storage group %(storageGroupInstanceName)s "
|
||||||
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
|
"to tier policy rule %(tierPolicyRuleInstanceName)s.",
|
||||||
{'storageGroupInstanceName': storageGroupInstanceName,
|
{'storageGroupInstanceName': storageGroupInstanceName,
|
||||||
'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
|
'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
|
||||||
return failedRet
|
return failedRet
|
||||||
@ -588,15 +586,15 @@ class VMAXFast(object):
|
|||||||
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
|
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
LOG.error(_LE("Error disassociating storage group from "
|
LOG.error("Error disassociating storage group from "
|
||||||
"policy: %s."), errordesc)
|
"policy: %s.", errordesc)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Disassociated storage group from policy.")
|
LOG.debug("Disassociated storage group from policy.")
|
||||||
else:
|
else:
|
||||||
LOG.debug("ModifyStorageTierPolicyRule completed.")
|
LOG.debug("ModifyStorageTierPolicyRule completed.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_LI("Storage group not associated with the "
|
LOG.info("Storage group not associated with the "
|
||||||
"policy. Exception is %s."), e)
|
"policy. Exception is %s.", e)
|
||||||
|
|
||||||
def get_pool_associated_to_policy(
|
def get_pool_associated_to_policy(
|
||||||
self, conn, fastPolicyName, arraySN,
|
self, conn, fastPolicyName, arraySN,
|
||||||
@ -664,7 +662,7 @@ class VMAXFast(object):
|
|||||||
isTieringPolicySupported = self.is_tiering_policy_enabled(
|
isTieringPolicySupported = self.is_tiering_policy_enabled(
|
||||||
conn, tierPolicyServiceInstanceName)
|
conn, tierPolicyServiceInstanceName)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Exception: %s."), e)
|
LOG.error("Exception: %s.", e)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return isTieringPolicySupported
|
return isTieringPolicySupported
|
||||||
|
@ -18,7 +18,6 @@ import ast
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder.i18n import _LW
|
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell_emc.vmax import common
|
from cinder.volume.drivers.dell_emc.vmax import common
|
||||||
@ -274,7 +273,7 @@ class VMAXFCDriver(driver.FibreChannelDriver):
|
|||||||
'target_wwns': target_wwns,
|
'target_wwns': target_wwns,
|
||||||
'init_targ_map': init_targ_map}
|
'init_targ_map': init_targ_map}
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
|
LOG.warning("Volume %(volume)s is not in any masking view.",
|
||||||
{'volume': volume['name']})
|
{'volume': volume['name']})
|
||||||
return zoning_mappings
|
return zoning_mappings
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ import six
|
|||||||
from six.moves import http_client
|
from six.moves import http_client
|
||||||
from six.moves import urllib
|
from six.moves import urllib
|
||||||
|
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _
|
||||||
|
|
||||||
# Handle case where we are running in a monkey patched environment
|
# Handle case where we are running in a monkey patched environment
|
||||||
if OpenSSL and patcher.is_monkey_patched('socket'):
|
if OpenSSL and patcher.is_monkey_patched('socket'):
|
||||||
@ -94,9 +94,9 @@ class HTTPSConnection(http_client.HTTPSConnection):
|
|||||||
def __init__(self, host, port=None, key_file=None, cert_file=None,
|
def __init__(self, host, port=None, key_file=None, cert_file=None,
|
||||||
strict=None, ca_certs=None, no_verification=False):
|
strict=None, ca_certs=None, no_verification=False):
|
||||||
if not pywbemAvailable:
|
if not pywbemAvailable:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
'Module PyWBEM not installed. '
|
'Module PyWBEM not installed. '
|
||||||
'Install PyWBEM using the python-pywbem package.'))
|
'Install PyWBEM using the python-pywbem package.')
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
excp_lst = (TypeError, ssl.SSLError)
|
excp_lst = (TypeError, ssl.SSLError)
|
||||||
else:
|
else:
|
||||||
|
@ -20,7 +20,7 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell_emc.vmax import common
|
from cinder.volume.drivers.dell_emc.vmax import common
|
||||||
@ -209,7 +209,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
|
|||||||
iscsi_properties = self.smis_get_iscsi_properties(
|
iscsi_properties = self.smis_get_iscsi_properties(
|
||||||
volume, connector, ip_and_iqn, is_multipath)
|
volume, connector, ip_and_iqn, is_multipath)
|
||||||
|
|
||||||
LOG.info(_LI("iSCSI properties are: %s"), iscsi_properties)
|
LOG.info("iSCSI properties are: %s", iscsi_properties)
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': 'iscsi',
|
'driver_volume_type': 'iscsi',
|
||||||
'data': iscsi_properties
|
'data': iscsi_properties
|
||||||
@ -246,7 +246,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
|
|||||||
isError = True
|
isError = True
|
||||||
|
|
||||||
if isError:
|
if isError:
|
||||||
LOG.error(_LE("Unable to get the lun id"))
|
LOG.error("Unable to get the lun id")
|
||||||
exception_message = (_("Cannot find device number for volume "
|
exception_message = (_("Cannot find device number for volume "
|
||||||
"%(volumeName)s.")
|
"%(volumeName)s.")
|
||||||
% {'volumeName': volume['name']})
|
% {'volumeName': volume['name']})
|
||||||
@ -265,15 +265,14 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
|
|||||||
properties['target_lun'] = lun_id
|
properties['target_lun'] = lun_id
|
||||||
properties['volume_id'] = volume['id']
|
properties['volume_id'] = volume['id']
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"ISCSI properties: %(properties)s."), {'properties': properties})
|
"ISCSI properties: %(properties)s.", {'properties': properties})
|
||||||
LOG.info(_LI(
|
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
|
||||||
"ISCSI volume is: %(volume)s."), {'volume': volume})
|
|
||||||
|
|
||||||
if 'provider_auth' in volume:
|
if 'provider_auth' in volume:
|
||||||
auth = volume['provider_auth']
|
auth = volume['provider_auth']
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"AUTH properties: %(authProps)s."), {'authProps': auth})
|
"AUTH properties: %(authProps)s.", {'authProps': auth})
|
||||||
|
|
||||||
if auth is not None:
|
if auth is not None:
|
||||||
(auth_method, auth_username, auth_secret) = auth.split()
|
(auth_method, auth_username, auth_secret) = auth.split()
|
||||||
@ -282,7 +281,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
|
|||||||
properties['auth_username'] = auth_username
|
properties['auth_username'] = auth_username
|
||||||
properties['auth_password'] = auth_secret
|
properties['auth_password'] = auth_secret
|
||||||
|
|
||||||
LOG.info(_LI("AUTH properties: %s."), properties)
|
LOG.info("AUTH properties: %s.", properties)
|
||||||
|
|
||||||
return properties
|
return properties
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ import six
|
|||||||
|
|
||||||
from cinder import coordination
|
from cinder import coordination
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vmax import fast
|
from cinder.volume.drivers.dell_emc.vmax import fast
|
||||||
from cinder.volume.drivers.dell_emc.vmax import provision
|
from cinder.volume.drivers.dell_emc.vmax import provision
|
||||||
from cinder.volume.drivers.dell_emc.vmax import provision_v3
|
from cinder.volume.drivers.dell_emc.vmax import provision_v3
|
||||||
@ -125,10 +125,10 @@ class VMAXMasking(object):
|
|||||||
{'maskingViewInstanceName': maskingViewInstanceName,
|
{'maskingViewInstanceName': maskingViewInstanceName,
|
||||||
'storageGroupInstanceName': storageGroupInstanceName})
|
'storageGroupInstanceName': storageGroupInstanceName})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_LE(
|
LOG.exception(
|
||||||
"Masking View creation or retrieval was not successful "
|
"Masking View creation or retrieval was not successful "
|
||||||
"for masking view %(maskingViewName)s. "
|
"for masking view %(maskingViewName)s. "
|
||||||
"Attempting rollback."),
|
"Attempting rollback.",
|
||||||
{'maskingViewName': maskingViewDict['maskingViewName']})
|
{'maskingViewName': maskingViewDict['maskingViewName']})
|
||||||
errorMessage = e
|
errorMessage = e
|
||||||
|
|
||||||
@ -225,9 +225,9 @@ class VMAXMasking(object):
|
|||||||
volumeName, maskingviewdict,
|
volumeName, maskingviewdict,
|
||||||
defaultStorageGroupInstanceName)
|
defaultStorageGroupInstanceName)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume: %(volumeName)s does not belong "
|
"Volume: %(volumeName)s does not belong "
|
||||||
"to storage group %(defaultSgGroupName)s."),
|
"to storage group %(defaultSgGroupName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'defaultSgGroupName': defaultSgGroupName})
|
'defaultSgGroupName': defaultSgGroupName})
|
||||||
return defaultStorageGroupInstanceName
|
return defaultStorageGroupInstanceName
|
||||||
@ -283,8 +283,7 @@ class VMAXMasking(object):
|
|||||||
storageSystemName = maskingViewDict['storageSystemName']
|
storageSystemName = maskingViewDict['storageSystemName']
|
||||||
maskingViewName = maskingViewDict['maskingViewName']
|
maskingViewName = maskingViewDict['maskingViewName']
|
||||||
pgGroupName = maskingViewDict['pgGroupName']
|
pgGroupName = maskingViewDict['pgGroupName']
|
||||||
LOG.info(_LI("Returning random Port Group: "
|
LOG.info("Returning random Port Group: %(portGroupName)s.",
|
||||||
"%(portGroupName)s."),
|
|
||||||
{'portGroupName': pgGroupName})
|
{'portGroupName': pgGroupName})
|
||||||
|
|
||||||
storageGroupInstanceName, errorMessage = (
|
storageGroupInstanceName, errorMessage = (
|
||||||
@ -376,7 +375,7 @@ class VMAXMasking(object):
|
|||||||
self._get_storage_group_instance_name(
|
self._get_storage_group_instance_name(
|
||||||
conn, maskingViewDict, storageGroupInstanceName))
|
conn, maskingViewDict, storageGroupInstanceName))
|
||||||
if storageGroupInstanceName is None:
|
if storageGroupInstanceName is None:
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Cannot get or create a storage group: %(sgGroupName)s"
|
"Cannot get or create a storage group: %(sgGroupName)s"
|
||||||
" for volume %(volumeName)s ") %
|
" for volume %(volumeName)s ") %
|
||||||
@ -404,7 +403,7 @@ class VMAXMasking(object):
|
|||||||
conn, maskingViewInstanceName))
|
conn, maskingViewInstanceName))
|
||||||
|
|
||||||
if sgFromMvInstanceName is None:
|
if sgFromMvInstanceName is None:
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Cannot get storage group: %(sgGroupName)s "
|
"Cannot get storage group: %(sgGroupName)s "
|
||||||
"from masking view %(maskingViewInstanceName)s. ") %
|
"from masking view %(maskingViewInstanceName)s. ") %
|
||||||
@ -427,7 +426,7 @@ class VMAXMasking(object):
|
|||||||
portGroupInstanceName = self._get_port_group_instance_name(
|
portGroupInstanceName = self._get_port_group_instance_name(
|
||||||
conn, controllerConfigService, pgGroupName)
|
conn, controllerConfigService, pgGroupName)
|
||||||
if portGroupInstanceName is None:
|
if portGroupInstanceName is None:
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Cannot get port group: %(pgGroupName)s. ") %
|
"Cannot get port group: %(pgGroupName)s. ") %
|
||||||
{'pgGroupName': pgGroupName})
|
{'pgGroupName': pgGroupName})
|
||||||
@ -455,7 +454,7 @@ class VMAXMasking(object):
|
|||||||
conn, controllerConfigService, igGroupName, connector,
|
conn, controllerConfigService, igGroupName, connector,
|
||||||
storageSystemName, extraSpecs))
|
storageSystemName, extraSpecs))
|
||||||
if initiatorGroupInstanceName is None:
|
if initiatorGroupInstanceName is None:
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Cannot get or create initiator group: "
|
"Cannot get or create initiator group: "
|
||||||
"%(igGroupName)s. ") %
|
"%(igGroupName)s. ") %
|
||||||
@ -486,7 +485,7 @@ class VMAXMasking(object):
|
|||||||
conn, controllerConfigService, maskingViewName,
|
conn, controllerConfigService, maskingViewName,
|
||||||
connector, storageSystemName, igGroupName,
|
connector, storageSystemName, igGroupName,
|
||||||
extraSpecs):
|
extraSpecs):
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Unable to verify initiator group: %(igGroupName)s "
|
"Unable to verify initiator group: %(igGroupName)s "
|
||||||
"in masking view %(maskingViewName)s. ") %
|
"in masking view %(maskingViewName)s. ") %
|
||||||
@ -518,7 +517,7 @@ class VMAXMasking(object):
|
|||||||
storageGroupInstanceName, portGroupInstanceName,
|
storageGroupInstanceName, portGroupInstanceName,
|
||||||
initiatorGroupInstanceName, extraSpecs))
|
initiatorGroupInstanceName, extraSpecs))
|
||||||
if maskingViewInstanceName is None:
|
if maskingViewInstanceName is None:
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Cannot create masking view: %(maskingViewName)s. ") %
|
"Cannot create masking view: %(maskingViewName)s. ") %
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
@ -543,9 +542,9 @@ class VMAXMasking(object):
|
|||||||
if self._is_volume_in_storage_group(
|
if self._is_volume_in_storage_group(
|
||||||
conn, storageGroupInstanceName,
|
conn, storageGroupInstanceName,
|
||||||
volumeInstance, sgGroupName):
|
volumeInstance, sgGroupName):
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume: %(volumeName)s is already part "
|
"Volume: %(volumeName)s is already part "
|
||||||
"of storage group %(sgGroupName)s."),
|
"of storage group %(sgGroupName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'sgGroupName': sgGroupName})
|
'sgGroupName': sgGroupName})
|
||||||
else:
|
else:
|
||||||
@ -576,7 +575,7 @@ class VMAXMasking(object):
|
|||||||
volumeInstance, volumeName, sgGroupName, extraSpecs)
|
volumeInstance, volumeName, sgGroupName, extraSpecs)
|
||||||
if not self._is_volume_in_storage_group(
|
if not self._is_volume_in_storage_group(
|
||||||
conn, storageGroupInstanceName, volumeInstance, sgGroupName):
|
conn, storageGroupInstanceName, volumeInstance, sgGroupName):
|
||||||
# This may be used in exception hence _ instead of _LE.
|
# This may be used in exception hence the use of _.
|
||||||
msg = (_(
|
msg = (_(
|
||||||
"Volume: %(volumeName)s was not added "
|
"Volume: %(volumeName)s was not added "
|
||||||
"to storage group %(sgGroupName)s.") %
|
"to storage group %(sgGroupName)s.") %
|
||||||
@ -584,8 +583,7 @@ class VMAXMasking(object):
|
|||||||
'sgGroupName': sgGroupName})
|
'sgGroupName': sgGroupName})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Successfully added %(volumeName)s to "
|
LOG.info("Successfully added %(volumeName)s to %(sgGroupName)s.",
|
||||||
"%(sgGroupName)s."),
|
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'sgGroupName': sgGroupName})
|
'sgGroupName': sgGroupName})
|
||||||
return msg
|
return msg
|
||||||
@ -742,9 +740,9 @@ class VMAXMasking(object):
|
|||||||
conn, foundMaskingViewInstanceName)
|
conn, foundMaskingViewInstanceName)
|
||||||
if instance is None:
|
if instance is None:
|
||||||
foundMaskingViewInstanceName = None
|
foundMaskingViewInstanceName = None
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Looks like masking view: %(maskingViewName)s "
|
"Looks like masking view: %(maskingViewName)s "
|
||||||
"has recently been deleted."),
|
"has recently been deleted.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
else:
|
else:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
@ -800,21 +798,21 @@ class VMAXMasking(object):
|
|||||||
storageGroupName, fastPolicyName,
|
storageGroupName, fastPolicyName,
|
||||||
maskingViewDict['extraSpecs']))
|
maskingViewDict['extraSpecs']))
|
||||||
if assocTierPolicyInstanceName is None:
|
if assocTierPolicyInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot add and verify tier policy association for "
|
"Cannot add and verify tier policy association for "
|
||||||
"storage group : %(storageGroupName)s to "
|
"storage group : %(storageGroupName)s to "
|
||||||
"FAST policy : %(fastPolicyName)s."),
|
"FAST policy : %(fastPolicyName)s.",
|
||||||
{'storageGroupName': storageGroupName,
|
{'storageGroupName': storageGroupName,
|
||||||
'fastPolicyName': fastPolicyName})
|
'fastPolicyName': fastPolicyName})
|
||||||
return failedRet
|
return failedRet
|
||||||
if foundStorageGroupInstanceName is None:
|
if foundStorageGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot get storage Group from job : %(storageGroupName)s."),
|
"Cannot get storage Group from job : %(storageGroupName)s.",
|
||||||
{'storageGroupName': storageGroupName})
|
{'storageGroupName': storageGroupName})
|
||||||
return failedRet
|
return failedRet
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Created new storage group: %(storageGroupName)s."),
|
"Created new storage group: %(storageGroupName)s.",
|
||||||
{'storageGroupName': storageGroupName})
|
{'storageGroupName': storageGroupName})
|
||||||
|
|
||||||
return foundStorageGroupInstanceName
|
return foundStorageGroupInstanceName
|
||||||
@ -843,9 +841,9 @@ class VMAXMasking(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if foundPortGroupInstanceName is None:
|
if foundPortGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Could not find port group : %(portGroupName)s. Check that "
|
"Could not find port group : %(portGroupName)s. Check that "
|
||||||
"the EMC configuration file has the correct port group name."),
|
"the EMC configuration file has the correct port group name.",
|
||||||
{'portGroupName': portGroupName})
|
{'portGroupName': portGroupName})
|
||||||
|
|
||||||
return foundPortGroupInstanceName
|
return foundPortGroupInstanceName
|
||||||
@ -886,9 +884,9 @@ class VMAXMasking(object):
|
|||||||
self._get_storage_hardware_id_instance_names(
|
self._get_storage_hardware_id_instance_names(
|
||||||
conn, initiatorNames, storageSystemName))
|
conn, initiatorNames, storageSystemName))
|
||||||
if not storageHardwareIDInstanceNames:
|
if not storageHardwareIDInstanceNames:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Initiator Name(s) %(initiatorNames)s are not on array "
|
"Initiator Name(s) %(initiatorNames)s are not on array "
|
||||||
"%(storageSystemName)s."),
|
"%(storageSystemName)s.",
|
||||||
{'initiatorNames': initiatorNames,
|
{'initiatorNames': initiatorNames,
|
||||||
'storageSystemName': storageSystemName})
|
'storageSystemName': storageSystemName})
|
||||||
storageHardwareIDInstanceNames = (
|
storageHardwareIDInstanceNames = (
|
||||||
@ -905,15 +903,13 @@ class VMAXMasking(object):
|
|||||||
conn, controllerConfigService, igGroupName,
|
conn, controllerConfigService, igGroupName,
|
||||||
storageHardwareIDInstanceNames, extraSpecs)
|
storageHardwareIDInstanceNames, extraSpecs)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Created new initiator group name: %(igGroupName)s.",
|
||||||
"Created new initiator group name: %(igGroupName)s."),
|
{'igGroupName': igGroupName})
|
||||||
{'igGroupName': igGroupName})
|
|
||||||
else:
|
else:
|
||||||
initiatorGroupInstance = conn.GetInstance(
|
initiatorGroupInstance = conn.GetInstance(
|
||||||
foundInitiatorGroupInstanceName, LocalOnly=False)
|
foundInitiatorGroupInstanceName, LocalOnly=False)
|
||||||
LOG.info(_LI(
|
LOG.info("Using existing initiator group name: %(igGroupName)s.",
|
||||||
"Using existing initiator group name: %(igGroupName)s."),
|
{'igGroupName': initiatorGroupInstance['ElementName']})
|
||||||
{'igGroupName': initiatorGroupInstance['ElementName']})
|
|
||||||
|
|
||||||
return foundInitiatorGroupInstanceName
|
return foundInitiatorGroupInstanceName
|
||||||
|
|
||||||
@ -1100,9 +1096,8 @@ class VMAXMasking(object):
|
|||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Created new masking view : %(maskingViewName)s.",
|
||||||
"Created new masking view : %(maskingViewName)s."),
|
{'maskingViewName': maskingViewName})
|
||||||
{'maskingViewName': maskingViewName})
|
|
||||||
return rc, job
|
return rc, job
|
||||||
|
|
||||||
def find_new_masking_view(self, conn, jobDict):
|
def find_new_masking_view(self, conn, jobDict):
|
||||||
@ -1148,7 +1143,7 @@ class VMAXMasking(object):
|
|||||||
{'view': maskingViewName,
|
{'view': maskingViewName,
|
||||||
'masking': foundStorageGroupInstanceName})
|
'masking': foundStorageGroupInstanceName})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
|
LOG.warning("Unable to find Masking view: %(view)s.",
|
||||||
{'view': maskingViewName})
|
{'view': maskingViewName})
|
||||||
|
|
||||||
return foundStorageGroupInstanceName
|
return foundStorageGroupInstanceName
|
||||||
@ -1221,14 +1216,14 @@ class VMAXMasking(object):
|
|||||||
foundPortGroupInstanceName = self.find_port_group(
|
foundPortGroupInstanceName = self.find_port_group(
|
||||||
conn, controllerConfigService, pgGroupName)
|
conn, controllerConfigService, pgGroupName)
|
||||||
if foundPortGroupInstanceName is None:
|
if foundPortGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot find a portGroup with name %(pgGroupName)s. "
|
"Cannot find a portGroup with name %(pgGroupName)s. "
|
||||||
"The port group for a masking view must be pre-defined."),
|
"The port group for a masking view must be pre-defined.",
|
||||||
{'pgGroupName': pgGroupName})
|
{'pgGroupName': pgGroupName})
|
||||||
return foundPortGroupInstanceName
|
return foundPortGroupInstanceName
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Port group instance name is %(foundPortGroupInstanceName)s."),
|
"Port group instance name is %(foundPortGroupInstanceName)s.",
|
||||||
{'foundPortGroupInstanceName': foundPortGroupInstanceName})
|
{'foundPortGroupInstanceName': foundPortGroupInstanceName})
|
||||||
|
|
||||||
return foundPortGroupInstanceName
|
return foundPortGroupInstanceName
|
||||||
@ -1250,10 +1245,9 @@ class VMAXMasking(object):
|
|||||||
conn, controllerConfigService, igGroupName, connector,
|
conn, controllerConfigService, igGroupName, connector,
|
||||||
storageSystemName, extraSpecs))
|
storageSystemName, extraSpecs))
|
||||||
if foundInitiatorGroupInstanceName is None:
|
if foundInitiatorGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error("Cannot create or find an initiator group with "
|
||||||
"Cannot create or find an initiator group with "
|
"name %(igGroupName)s.",
|
||||||
"name %(igGroupName)s."),
|
{'igGroupName': igGroupName})
|
||||||
{'igGroupName': igGroupName})
|
|
||||||
return foundInitiatorGroupInstanceName
|
return foundInitiatorGroupInstanceName
|
||||||
|
|
||||||
def _get_masking_view_instance_name(
|
def _get_masking_view_instance_name(
|
||||||
@ -1278,9 +1272,9 @@ class VMAXMasking(object):
|
|||||||
initiatorGroupInstanceName, extraSpecs))
|
initiatorGroupInstanceName, extraSpecs))
|
||||||
foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
|
foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
|
||||||
if foundMaskingViewInstanceName is None:
|
if foundMaskingViewInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot find the new masking view just created with name "
|
"Cannot find the new masking view just created with name "
|
||||||
"%(maskingViewName)s."),
|
"%(maskingViewName)s.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
|
|
||||||
return foundMaskingViewInstanceName
|
return foundMaskingViewInstanceName
|
||||||
@ -1324,11 +1318,11 @@ class VMAXMasking(object):
|
|||||||
LOG.error(errorMessage)
|
LOG.error(errorMessage)
|
||||||
message = (_("V3 rollback"))
|
message = (_("V3 rollback"))
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"No storage group found. "
|
"No storage group found. "
|
||||||
"Performing rollback on Volume: %(volumeName)s "
|
"Performing rollback on Volume: %(volumeName)s "
|
||||||
"To return it to the default storage group for FAST "
|
"To return it to the default storage group for FAST "
|
||||||
"policy %(fastPolicyName)s."),
|
"policy %(fastPolicyName)s.",
|
||||||
{'volumeName': rollbackDict['volumeName'],
|
{'volumeName': rollbackDict['volumeName'],
|
||||||
'fastPolicyName': rollbackDict['fastPolicyName']})
|
'fastPolicyName': rollbackDict['fastPolicyName']})
|
||||||
assocDefaultStorageGroupName = (
|
assocDefaultStorageGroupName = (
|
||||||
@ -1341,22 +1335,21 @@ class VMAXMasking(object):
|
|||||||
rollbackDict['fastPolicyName'],
|
rollbackDict['fastPolicyName'],
|
||||||
rollbackDict['extraSpecs']))
|
rollbackDict['extraSpecs']))
|
||||||
if assocDefaultStorageGroupName is None:
|
if assocDefaultStorageGroupName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to Roll back to re-add volume "
|
"Failed to Roll back to re-add volume "
|
||||||
"%(volumeName)s "
|
"%(volumeName)s "
|
||||||
"to default storage group for fast policy "
|
"to default storage group for fast policy "
|
||||||
"%(fastPolicyName)s: Please contact your sys "
|
"%(fastPolicyName)s: Please contact your sys "
|
||||||
"admin to get the volume re-added manually."),
|
"admin to get the volume re-added manually.",
|
||||||
{'volumeName': rollbackDict['volumeName'],
|
{'volumeName': rollbackDict['volumeName'],
|
||||||
'fastPolicyName': rollbackDict['fastPolicyName']})
|
'fastPolicyName': rollbackDict['fastPolicyName']})
|
||||||
message = (_("V2 rollback, volume is not in any storage "
|
message = (_("V2 rollback, volume is not in any storage "
|
||||||
"group."))
|
"group."))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info("The storage group found is "
|
||||||
"The storage group found is "
|
"%(foundStorageGroupInstanceName)s.",
|
||||||
"%(foundStorageGroupInstanceName)s."),
|
{'foundStorageGroupInstanceName':
|
||||||
{'foundStorageGroupInstanceName':
|
foundStorageGroupInstanceName})
|
||||||
foundStorageGroupInstanceName})
|
|
||||||
|
|
||||||
# Check the name, see if it is the default storage group
|
# Check the name, see if it is the default storage group
|
||||||
# or another.
|
# or another.
|
||||||
@ -1422,7 +1415,7 @@ class VMAXMasking(object):
|
|||||||
{'view': maskingViewName,
|
{'view': maskingViewName,
|
||||||
'masking': foundInitiatorMaskingGroupInstanceName})
|
'masking': foundInitiatorMaskingGroupInstanceName})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
|
LOG.warning("Unable to find Masking view: %(view)s.",
|
||||||
{'view': maskingViewName})
|
{'view': maskingViewName})
|
||||||
|
|
||||||
return foundInitiatorMaskingGroupInstanceName
|
return foundInitiatorMaskingGroupInstanceName
|
||||||
@ -1471,18 +1464,18 @@ class VMAXMasking(object):
|
|||||||
self._get_storage_hardware_id_instance_names(
|
self._get_storage_hardware_id_instance_names(
|
||||||
conn, initiatorNames, storageSystemName))
|
conn, initiatorNames, storageSystemName))
|
||||||
if not storageHardwareIDInstanceNames:
|
if not storageHardwareIDInstanceNames:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Initiator Name(s) %(initiatorNames)s are not on "
|
"Initiator Name(s) %(initiatorNames)s are not on "
|
||||||
"array %(storageSystemName)s. "),
|
"array %(storageSystemName)s.",
|
||||||
{'initiatorNames': initiatorNames,
|
{'initiatorNames': initiatorNames,
|
||||||
'storageSystemName': storageSystemName})
|
'storageSystemName': storageSystemName})
|
||||||
storageHardwareIDInstanceNames = (
|
storageHardwareIDInstanceNames = (
|
||||||
self._create_hardware_ids(conn, initiatorNames,
|
self._create_hardware_ids(conn, initiatorNames,
|
||||||
storageSystemName))
|
storageSystemName))
|
||||||
if not storageHardwareIDInstanceNames:
|
if not storageHardwareIDInstanceNames:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to create hardware id(s) on "
|
"Failed to create hardware id(s) on "
|
||||||
"%(storageSystemName)s."),
|
"%(storageSystemName)s.",
|
||||||
{'storageSystemName': storageSystemName})
|
{'storageSystemName': storageSystemName})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1532,11 +1525,11 @@ class VMAXMasking(object):
|
|||||||
"%(maskingViewName)s.",
|
"%(maskingViewName)s.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"One of the components of the original masking view "
|
"One of the components of the original masking view "
|
||||||
"%(maskingViewName)s cannot be retrieved so "
|
"%(maskingViewName)s cannot be retrieved so "
|
||||||
"please contact your system administrator to check "
|
"please contact your system administrator to check "
|
||||||
"that the correct initiator(s) are part of masking."),
|
"that the correct initiator(s) are part of masking.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
@ -1708,9 +1701,9 @@ class VMAXMasking(object):
|
|||||||
conn, controllerConfigService, storageGroupInstanceName,
|
conn, controllerConfigService, storageGroupInstanceName,
|
||||||
volumeInstance.path, volumeName, extraSpecs)
|
volumeInstance.path, volumeName, extraSpecs)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Added volume: %(volumeName)s to existing storage group "
|
"Added volume: %(volumeName)s to existing storage group "
|
||||||
"%(sgGroupName)s."),
|
"%(sgGroupName)s.",
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'sgGroupName': sgGroupName})
|
'sgGroupName': sgGroupName})
|
||||||
|
|
||||||
@ -1737,9 +1730,9 @@ class VMAXMasking(object):
|
|||||||
volumeName, fastPolicyName))
|
volumeName, fastPolicyName))
|
||||||
|
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Volume %(volumeName)s was not first part of the default "
|
"Volume %(volumeName)s was not first part of the default "
|
||||||
"storage group for the FAST Policy."),
|
"storage group for the FAST Policy.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -1775,9 +1768,9 @@ class VMAXMasking(object):
|
|||||||
defaultSgName))
|
defaultSgName))
|
||||||
|
|
||||||
if emptyStorageGroupInstanceName is not None:
|
if emptyStorageGroupInstanceName is not None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Failed to remove %(volumeName)s from the default storage "
|
"Failed to remove %(volumeName)s from the default storage "
|
||||||
"group for the FAST Policy."),
|
"group for the FAST Policy.",
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
return failedRet
|
return failedRet
|
||||||
|
|
||||||
@ -1833,7 +1826,7 @@ class VMAXMasking(object):
|
|||||||
if len(maskingGroupInstanceNames) > 0:
|
if len(maskingGroupInstanceNames) > 0:
|
||||||
return maskingGroupInstanceNames
|
return maskingGroupInstanceNames
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Volume %(volumeName)s not in any storage group."),
|
LOG.info("Volume %(volumeName)s not in any storage group.",
|
||||||
{'volumeName': volumeInstanceName})
|
{'volumeName': volumeInstanceName})
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -1870,7 +1863,7 @@ class VMAXMasking(object):
|
|||||||
storageGroupInstanceName,
|
storageGroupInstanceName,
|
||||||
volumeInstance, extraSpecs)
|
volumeInstance, extraSpecs)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Cannot get storage from connector."))
|
LOG.warning("Cannot get storage from connector.")
|
||||||
|
|
||||||
if reset:
|
if reset:
|
||||||
self._return_back_to_default_sg(
|
self._return_back_to_default_sg(
|
||||||
@ -1895,8 +1888,8 @@ class VMAXMasking(object):
|
|||||||
if storageGroupInstanceNames:
|
if storageGroupInstanceNames:
|
||||||
sgNum = len(storageGroupInstanceNames)
|
sgNum = len(storageGroupInstanceNames)
|
||||||
if len(storageGroupInstanceNames) > 1:
|
if len(storageGroupInstanceNames) > 1:
|
||||||
LOG.warning(_LW("Volume %(volumeName)s is belong to "
|
LOG.warning("Volume %(volumeName)s is belong to %(sgNum)s "
|
||||||
"%(sgNum)s storage groups."),
|
"storage groups.",
|
||||||
{'volumeName': volumeInstance['ElementName'],
|
{'volumeName': volumeInstance['ElementName'],
|
||||||
'sgNum': sgNum})
|
'sgNum': sgNum})
|
||||||
for storageGroupInstanceName in storageGroupInstanceNames:
|
for storageGroupInstanceName in storageGroupInstanceNames:
|
||||||
@ -2237,8 +2230,8 @@ class VMAXMasking(object):
|
|||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Masking view %(maskingViewName)s successfully deleted."),
|
"Masking view %(maskingViewName)s successfully deleted.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
|
|
||||||
def _get_and_remove_rule_association(
|
def _get_and_remove_rule_association(
|
||||||
@ -2355,8 +2348,8 @@ class VMAXMasking(object):
|
|||||||
ResultClass='Symm_FCSCSIProtocolEndpoint')
|
ResultClass='Symm_FCSCSIProtocolEndpoint')
|
||||||
numberOfPorts = len(targetPortInstanceNames)
|
numberOfPorts = len(targetPortInstanceNames)
|
||||||
if numberOfPorts <= 0:
|
if numberOfPorts <= 0:
|
||||||
LOG.warning(_LW("No target ports found in "
|
LOG.warning("No target ports found in "
|
||||||
"masking view %(maskingView)s."),
|
"masking view %(maskingView)s.",
|
||||||
{'numPorts': len(targetPortInstanceNames),
|
{'numPorts': len(targetPortInstanceNames),
|
||||||
'maskingView': mvInstanceName})
|
'maskingView': mvInstanceName})
|
||||||
for targetPortInstanceName in targetPortInstanceNames:
|
for targetPortInstanceName in targetPortInstanceNames:
|
||||||
@ -2425,7 +2418,7 @@ class VMAXMasking(object):
|
|||||||
'mv': maskingViewInstanceName})
|
'mv': maskingViewInstanceName})
|
||||||
return portGroupInstanceNames[0]
|
return portGroupInstanceNames[0]
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No port group found in masking view %(mv)s."),
|
LOG.warning("No port group found in masking view %(mv)s.",
|
||||||
{'mv': maskingViewInstanceName})
|
{'mv': maskingViewInstanceName})
|
||||||
|
|
||||||
def get_initiator_group_from_masking_view(
|
def get_initiator_group_from_masking_view(
|
||||||
@ -2444,8 +2437,8 @@ class VMAXMasking(object):
|
|||||||
'mv': maskingViewInstanceName})
|
'mv': maskingViewInstanceName})
|
||||||
return initiatorGroupInstanceNames[0]
|
return initiatorGroupInstanceNames[0]
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No Initiator group found in masking view "
|
LOG.warning("No Initiator group found in masking view "
|
||||||
"%(mv)s."), {'mv': maskingViewInstanceName})
|
"%(mv)s.", {'mv': maskingViewInstanceName})
|
||||||
|
|
||||||
def _get_sg_or_mv_associated_with_initiator(
|
def _get_sg_or_mv_associated_with_initiator(
|
||||||
self, conn, controllerConfigService, volumeInstanceName,
|
self, conn, controllerConfigService, volumeInstanceName,
|
||||||
@ -2656,8 +2649,8 @@ class VMAXMasking(object):
|
|||||||
LOG.debug("Deletion of initiator path %(hardwareIdPath)s "
|
LOG.debug("Deletion of initiator path %(hardwareIdPath)s "
|
||||||
"is successful.", {'hardwareIdPath': hardwareIdPath})
|
"is successful.", {'hardwareIdPath': hardwareIdPath})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Deletion of initiator path %(hardwareIdPath)s "
|
LOG.warning("Deletion of initiator path %(hardwareIdPath)s "
|
||||||
"is failed."), {'hardwareIdPath': hardwareIdPath})
|
"is failed.", {'hardwareIdPath': hardwareIdPath})
|
||||||
|
|
||||||
def _delete_initiators_from_initiator_group(self, conn,
|
def _delete_initiators_from_initiator_group(self, conn,
|
||||||
controllerConfigService,
|
controllerConfigService,
|
||||||
@ -2740,16 +2733,16 @@ class VMAXMasking(object):
|
|||||||
initiatorGroupInstanceName,
|
initiatorGroupInstanceName,
|
||||||
initiatorGroupName, extraSpecs)
|
initiatorGroupName, extraSpecs)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Initiator group %(initiatorGroupName)s is "
|
LOG.warning("Initiator group %(initiatorGroupName)s is "
|
||||||
"associated with masking views and can't be "
|
"associated with masking views and can't be "
|
||||||
"deleted. Number of associated masking view "
|
"deleted. Number of associated masking view "
|
||||||
"is: %(nmv)d."),
|
"is: %(nmv)d.",
|
||||||
{'initiatorGroupName': initiatorGroupName,
|
{'initiatorGroupName': initiatorGroupName,
|
||||||
'nmv': len(maskingViewInstanceNames)})
|
'nmv': len(maskingViewInstanceNames)})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Initiator group %(initiatorGroupName)s was "
|
LOG.warning("Initiator group %(initiatorGroupName)s was "
|
||||||
"not created by the VMAX driver so will "
|
"not created by the VMAX driver so will "
|
||||||
"not be deleted by the VMAX driver."),
|
"not be deleted by the VMAX driver.",
|
||||||
{'initiatorGroupName': initiatorGroupName})
|
{'initiatorGroupName': initiatorGroupName})
|
||||||
|
|
||||||
def _create_hardware_ids(
|
def _create_hardware_ids(
|
||||||
@ -2793,9 +2786,9 @@ class VMAXMasking(object):
|
|||||||
self._get_port_group_from_masking_view(
|
self._get_port_group_from_masking_view(
|
||||||
conn, maskingViewName, storageSystemName))
|
conn, maskingViewName, storageSystemName))
|
||||||
if portGroupInstanceName is None:
|
if portGroupInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot get port group from masking view: "
|
"Cannot get port group from masking view: "
|
||||||
"%(maskingViewName)s. "),
|
"%(maskingViewName)s.",
|
||||||
{'maskingViewName': maskingViewName})
|
{'maskingViewName': maskingViewName})
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
@ -2804,8 +2797,8 @@ class VMAXMasking(object):
|
|||||||
portGroupName = (
|
portGroupName = (
|
||||||
portGroupInstance['ElementName'])
|
portGroupInstance['ElementName'])
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Cannot get port group name."))
|
"Cannot get port group name.")
|
||||||
return portGroupName, errorMessage
|
return portGroupName, errorMessage
|
||||||
|
|
||||||
@coordination.synchronized('emc-sg-'
|
@coordination.synchronized('emc-sg-'
|
||||||
|
@ -20,7 +20,7 @@ import six
|
|||||||
|
|
||||||
from cinder import coordination
|
from cinder import coordination
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vmax import utils
|
from cinder.volume.drivers.dell_emc.vmax import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -515,9 +515,9 @@ class VMAXProvisionV3(object):
|
|||||||
rc, errordesc = self.utils.wait_for_job_complete(
|
rc, errordesc = self.utils.wait_for_job_complete(
|
||||||
conn, job, extraSpecs)
|
conn, job, extraSpecs)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Error Create Group: %(groupName)s. "
|
"Error Create Group: %(groupName)s. "
|
||||||
"Return code: %(rc)lu. Error: %(error)s."),
|
"Return code: %(rc)lu. Error: %(error)s.",
|
||||||
{'groupName': groupName,
|
{'groupName': groupName,
|
||||||
'rc': rc,
|
'rc': rc,
|
||||||
'error': errordesc})
|
'error': errordesc})
|
||||||
@ -863,11 +863,11 @@ class VMAXProvisionV3(object):
|
|||||||
remainingCapacityGb = remainingSLOCapacityGb
|
remainingCapacityGb = remainingSLOCapacityGb
|
||||||
wlpEnabled = True
|
wlpEnabled = True
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Remaining capacity %(remainingCapacityGb)s "
|
"Remaining capacity %(remainingCapacityGb)s "
|
||||||
"GBs is determined from SRP pool capacity "
|
"GBs is determined from SRP pool capacity "
|
||||||
"and not the SLO capacity. Performance may "
|
"and not the SLO capacity. Performance may "
|
||||||
"not be what you expect."),
|
"not be what you expect.",
|
||||||
{'remainingCapacityGb': remainingCapacityGb})
|
{'remainingCapacityGb': remainingCapacityGb})
|
||||||
|
|
||||||
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,
|
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,
|
||||||
|
@ -30,7 +30,7 @@ import six
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
@ -85,9 +85,9 @@ class VMAXUtils(object):
|
|||||||
|
|
||||||
def __init__(self, prtcl):
|
def __init__(self, prtcl):
|
||||||
if not pywbemAvailable:
|
if not pywbemAvailable:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Module PyWBEM not installed. "
|
"Module PyWBEM not installed. "
|
||||||
"Install PyWBEM using the python-pywbem package."))
|
"Install PyWBEM using the python-pywbem package.")
|
||||||
self.protocol = prtcl
|
self.protocol = prtcl
|
||||||
|
|
||||||
def find_storage_configuration_service(self, conn, storageSystemName):
|
def find_storage_configuration_service(self, conn, storageSystemName):
|
||||||
@ -319,9 +319,8 @@ class VMAXUtils(object):
|
|||||||
if retries > maxJobRetries:
|
if retries > maxJobRetries:
|
||||||
kwargs['rc'], kwargs['errordesc'] = (
|
kwargs['rc'], kwargs['errordesc'] = (
|
||||||
self._verify_job_state(conn, job))
|
self._verify_job_state(conn, job))
|
||||||
LOG.error(_LE("_wait_for_job_complete "
|
LOG.error("_wait_for_job_complete failed after %(retries)d "
|
||||||
"failed after %(retries)d "
|
"tries.",
|
||||||
"tries."),
|
|
||||||
{'retries': retries})
|
{'retries': retries})
|
||||||
|
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
@ -457,8 +456,7 @@ class VMAXUtils(object):
|
|||||||
raise exception.VolumeBackendAPIException(exceptionMessage)
|
raise exception.VolumeBackendAPIException(exceptionMessage)
|
||||||
|
|
||||||
if kwargs['retries'] > maxJobRetries:
|
if kwargs['retries'] > maxJobRetries:
|
||||||
LOG.error(_LE("_wait_for_sync failed after %(retries)d "
|
LOG.error("_wait_for_sync failed after %(retries)d tries.",
|
||||||
"tries."),
|
|
||||||
{'retries': retries})
|
{'retries': retries})
|
||||||
raise loopingcall.LoopingCallDone(retvalue=maxJobRetries)
|
raise loopingcall.LoopingCallDone(retvalue=maxJobRetries)
|
||||||
if kwargs['wait_for_sync_called']:
|
if kwargs['wait_for_sync_called']:
|
||||||
@ -526,7 +524,7 @@ class VMAXUtils(object):
|
|||||||
if len(groups) > 0:
|
if len(groups) > 0:
|
||||||
foundStorageSystemInstanceName = groups[0]
|
foundStorageSystemInstanceName = groups[0]
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Cannot get storage system."))
|
LOG.error("Cannot get storage system.")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return foundStorageSystemInstanceName
|
return foundStorageSystemInstanceName
|
||||||
@ -549,9 +547,9 @@ class VMAXUtils(object):
|
|||||||
ResultClass='CIM_DeviceMaskingGroup')
|
ResultClass='CIM_DeviceMaskingGroup')
|
||||||
|
|
||||||
if len(storageGroupInstanceNames) > 1:
|
if len(storageGroupInstanceNames) > 1:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"The volume belongs to more than one storage group. "
|
"The volume belongs to more than one storage group. "
|
||||||
"Returning storage group %(sgName)s."),
|
"Returning storage group %(sgName)s.",
|
||||||
{'sgName': sgName})
|
{'sgName': sgName})
|
||||||
for storageGroupInstanceName in storageGroupInstanceNames:
|
for storageGroupInstanceName in storageGroupInstanceNames:
|
||||||
instance = self.get_existing_instance(
|
instance = self.get_existing_instance(
|
||||||
@ -1001,9 +999,9 @@ class VMAXUtils(object):
|
|||||||
poolInstanceName = self.get_pool_by_name(
|
poolInstanceName = self.get_pool_by_name(
|
||||||
conn, poolName, storageSystemName)
|
conn, poolName, storageSystemName)
|
||||||
if poolInstanceName is None:
|
if poolInstanceName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Unable to retrieve pool instance of %(poolName)s on "
|
"Unable to retrieve pool instance of %(poolName)s on "
|
||||||
"array %(array)s."),
|
"array %(array)s.",
|
||||||
{'poolName': poolName, 'array': storageSystemName})
|
{'poolName': poolName, 'array': storageSystemName})
|
||||||
return (0, 0)
|
return (0, 0)
|
||||||
storagePoolInstance = conn.GetInstance(
|
storagePoolInstance = conn.GetInstance(
|
||||||
@ -1241,7 +1239,7 @@ class VMAXUtils(object):
|
|||||||
infoDetail = host.split('@')
|
infoDetail = host.split('@')
|
||||||
storageSystem = 'SYMMETRIX+' + infoDetail[0]
|
storageSystem = 'SYMMETRIX+' + infoDetail[0]
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Error parsing array from host capabilities."))
|
LOG.error("Error parsing array from host capabilities.")
|
||||||
|
|
||||||
return storageSystem
|
return storageSystem
|
||||||
|
|
||||||
@ -1292,15 +1290,15 @@ class VMAXUtils(object):
|
|||||||
if foundSyncInstanceName:
|
if foundSyncInstanceName:
|
||||||
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
|
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
|
||||||
if waitforsync:
|
if waitforsync:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Expect a performance hit as volume is not fully "
|
"Expect a performance hit as volume is not fully "
|
||||||
"synced on %(deviceId)s."),
|
"synced on %(deviceId)s.",
|
||||||
{'deviceId': volumeInstance['DeviceID']})
|
{'deviceId': volumeInstance['DeviceID']})
|
||||||
startTime = time.time()
|
startTime = time.time()
|
||||||
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
|
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Synchronization process took "
|
"Synchronization process took "
|
||||||
"took: %(delta)s H:MM:SS."),
|
"took: %(delta)s H:MM:SS.",
|
||||||
{'delta': self.get_time_delta(startTime,
|
{'delta': self.get_time_delta(startTime,
|
||||||
time.time())})
|
time.time())})
|
||||||
|
|
||||||
@ -1336,9 +1334,9 @@ class VMAXUtils(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if foundSyncInstanceName is None:
|
if foundSyncInstanceName is None:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Group sync name not found for target group %(target)s "
|
"Group sync name not found for target group %(target)s "
|
||||||
"on %(storageSystem)s."),
|
"on %(storageSystem)s.",
|
||||||
{'target': targetRgInstanceName['InstanceID'],
|
{'target': targetRgInstanceName['InstanceID'],
|
||||||
'storageSystem': storageSystem})
|
'storageSystem': storageSystem})
|
||||||
else:
|
else:
|
||||||
@ -1570,14 +1568,14 @@ class VMAXUtils(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not isValidSLO:
|
if not isValidSLO:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, "
|
"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, "
|
||||||
"Gold, Platinum, Diamond, Optimized, NONE."), {'slo': slo})
|
"Gold, Platinum, Diamond, Optimized, NONE.", {'slo': slo})
|
||||||
|
|
||||||
if not isValidWorkload:
|
if not isValidWorkload:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Workload: %(workload)s is not valid. Valid values are "
|
"Workload: %(workload)s is not valid. Valid values are "
|
||||||
"DSS_REP, DSS, OLTP, OLTP_REP, NONE."), {'workload': workload})
|
"DSS_REP, DSS, OLTP, OLTP_REP, NONE.", {'workload': workload})
|
||||||
|
|
||||||
return isValidSLO, isValidWorkload
|
return isValidSLO, isValidWorkload
|
||||||
|
|
||||||
@ -1641,8 +1639,8 @@ class VMAXUtils(object):
|
|||||||
if len(metaHeads) > 0:
|
if len(metaHeads) > 0:
|
||||||
metaHeadInstanceName = metaHeads[0]
|
metaHeadInstanceName = metaHeads[0]
|
||||||
if metaHeadInstanceName is None:
|
if metaHeadInstanceName is None:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Volume %(volume)s does not have meta device members."),
|
"Volume %(volume)s does not have meta device members.",
|
||||||
{'volume': volumeInstanceName})
|
{'volume': volumeInstanceName})
|
||||||
|
|
||||||
return metaHeadInstanceName
|
return metaHeadInstanceName
|
||||||
@ -1714,7 +1712,7 @@ class VMAXUtils(object):
|
|||||||
instance = None
|
instance = None
|
||||||
else:
|
else:
|
||||||
# Something else that we cannot recover from has happened.
|
# Something else that we cannot recover from has happened.
|
||||||
LOG.error(_LE("Exception: %s"), desc)
|
LOG.error("Exception: %s", desc)
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Cannot verify the existence of object:"
|
"Cannot verify the existence of object:"
|
||||||
"%(instanceName)s.")
|
"%(instanceName)s.")
|
||||||
@ -1806,8 +1804,8 @@ class VMAXUtils(object):
|
|||||||
{'initiator': initiator, 'rc': rc, 'ret': ret})
|
{'initiator': initiator, 'rc': rc, 'ret': ret})
|
||||||
hardwareIdList = ret['HardwareID']
|
hardwareIdList = ret['HardwareID']
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("CreateStorageHardwareID failed. initiator: "
|
LOG.warning("CreateStorageHardwareID failed. initiator: "
|
||||||
"%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
|
"%(initiator)s, rc=%(rc)d, ret=%(ret)s.",
|
||||||
{'initiator': initiator, 'rc': rc, 'ret': ret})
|
{'initiator': initiator, 'rc': rc, 'ret': ret})
|
||||||
return hardwareIdList
|
return hardwareIdList
|
||||||
|
|
||||||
@ -1826,7 +1824,7 @@ class VMAXUtils(object):
|
|||||||
if 'iqn' in initiator.lower():
|
if 'iqn' in initiator.lower():
|
||||||
hardwareTypeId = 5
|
hardwareTypeId = 5
|
||||||
if hardwareTypeId == 0:
|
if hardwareTypeId == 0:
|
||||||
LOG.warning(_LW("Cannot determine the hardware type."))
|
LOG.warning("Cannot determine the hardware type.")
|
||||||
return hardwareTypeId
|
return hardwareTypeId
|
||||||
|
|
||||||
def _process_tag(self, element, tagName):
|
def _process_tag(self, element, tagName):
|
||||||
@ -1976,15 +1974,15 @@ class VMAXUtils(object):
|
|||||||
portGroup = self._get_random_portgroup(dom)
|
portGroup = self._get_random_portgroup(dom)
|
||||||
serialNumber = self._process_tag(dom, 'Array')
|
serialNumber = self._process_tag(dom, 'Array')
|
||||||
if serialNumber is None:
|
if serialNumber is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"Array Serial Number must be in the file "
|
"Array Serial Number must be in the file "
|
||||||
"%(fileName)s."),
|
"%(fileName)s.",
|
||||||
{'fileName': fileName})
|
{'fileName': fileName})
|
||||||
poolName = self._process_tag(dom, 'Pool')
|
poolName = self._process_tag(dom, 'Pool')
|
||||||
if poolName is None:
|
if poolName is None:
|
||||||
LOG.error(_LE(
|
LOG.error(
|
||||||
"PoolName must be in the file "
|
"PoolName must be in the file "
|
||||||
"%(fileName)s."),
|
"%(fileName)s.",
|
||||||
{'fileName': fileName})
|
{'fileName': fileName})
|
||||||
kwargs = self._fill_record(
|
kwargs = self._fill_record(
|
||||||
connargs, serialNumber, poolName, portGroup, dom)
|
connargs, serialNumber, poolName, portGroup, dom)
|
||||||
@ -2024,9 +2022,8 @@ class VMAXUtils(object):
|
|||||||
% {'poolName': arrayInfoRec['PoolName'],
|
% {'poolName': arrayInfoRec['PoolName'],
|
||||||
'array': arrayInfoRec['SerialNumber']})
|
'array': arrayInfoRec['SerialNumber']})
|
||||||
if compString == pool:
|
if compString == pool:
|
||||||
LOG.info(_LI(
|
LOG.info("The pool_name from extraSpecs is %(pool)s.",
|
||||||
"The pool_name from extraSpecs is %(pool)s."),
|
{'pool': pool})
|
||||||
{'pool': pool})
|
|
||||||
foundArrayInfoRec = arrayInfoRec
|
foundArrayInfoRec = arrayInfoRec
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@ -2284,9 +2281,9 @@ class VMAXUtils(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if foundSyncInstanceName is None:
|
if foundSyncInstanceName is None:
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"No replication synchronization session found associated "
|
"No replication synchronization session found associated "
|
||||||
"with source volume %(source)s on %(storageSystem)s."),
|
"with source volume %(source)s on %(storageSystem)s.",
|
||||||
{'source': sourceDeviceId, 'storageSystem': storageSystem})
|
{'source': sourceDeviceId, 'storageSystem': storageSystem})
|
||||||
|
|
||||||
return foundSyncInstanceName
|
return foundSyncInstanceName
|
||||||
@ -2301,16 +2298,13 @@ class VMAXUtils(object):
|
|||||||
:returns: volume_model_updates - updated volumes
|
:returns: volume_model_updates - updated volumes
|
||||||
"""
|
"""
|
||||||
volume_model_updates = []
|
volume_model_updates = []
|
||||||
LOG.info(_LI(
|
LOG.info("Updaing status for CG: %(id)s.", {'id': cgId})
|
||||||
"Updating status for CG: %(id)s."),
|
|
||||||
{'id': cgId})
|
|
||||||
if volumes:
|
if volumes:
|
||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
volume_model_updates.append({'id': volume['id'],
|
volume_model_updates.append({'id': volume['id'],
|
||||||
'status': status})
|
'status': status})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("No volume found for CG: %(cg)s."),
|
LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId})
|
||||||
{'cg': cgId})
|
|
||||||
return volume_model_updates
|
return volume_model_updates
|
||||||
|
|
||||||
def get_smi_version(self, conn):
|
def get_smi_version(self, conn):
|
||||||
@ -2612,7 +2606,7 @@ class VMAXUtils(object):
|
|||||||
try:
|
try:
|
||||||
max_subscription_percent_int = int(max_subscription_percent)
|
max_subscription_percent_int = int(max_subscription_percent)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error(_LE("Cannot convert max subscription percent to int."))
|
LOG.error("Cannot convert max subscription percent to int.")
|
||||||
return None
|
return None
|
||||||
return float(max_subscription_percent_int) / 100
|
return float(max_subscription_percent_int) / 100
|
||||||
|
|
||||||
@ -2969,14 +2963,14 @@ class VMAXUtils(object):
|
|||||||
if foundSyncInstanceName:
|
if foundSyncInstanceName:
|
||||||
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
|
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
|
||||||
if waitforsync:
|
if waitforsync:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Expect a performance hit as volume is not not fully "
|
"Expect a performance hit as volume is not not fully "
|
||||||
"synced on %(deviceId)s."),
|
"synced on %(deviceId)s.",
|
||||||
{'deviceId': sourceInstance['DeviceID']})
|
{'deviceId': sourceInstance['DeviceID']})
|
||||||
startTime = time.time()
|
startTime = time.time()
|
||||||
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
|
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Synchronization process took: %(delta)s H:MM:SS."),
|
"Synchronization process took: %(delta)s H:MM:SS.",
|
||||||
{'delta': self.get_time_delta(startTime,
|
{'delta': self.get_time_delta(startTime,
|
||||||
time.time())})
|
time.time())})
|
||||||
|
|
||||||
@ -3011,8 +3005,8 @@ class VMAXUtils(object):
|
|||||||
extraSpecs[self.POOL] = poolDetails[2]
|
extraSpecs[self.POOL] = poolDetails[2]
|
||||||
extraSpecs[self.ARRAY] = poolDetails[3]
|
extraSpecs[self.ARRAY] = poolDetails[3]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(_LE("Error parsing SLO, workload from "
|
LOG.error("Error parsing SLO, workload from "
|
||||||
"the provided extra_specs."))
|
"the provided extra_specs.")
|
||||||
return extraSpecs
|
return extraSpecs
|
||||||
|
|
||||||
def get_default_intervals_retries(self):
|
def get_default_intervals_retries(self):
|
||||||
|
@ -27,7 +27,7 @@ if storops:
|
|||||||
from storops import exception as storops_ex
|
from storops import exception as storops_ex
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LE, _LW
|
from cinder.i18n import _
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume.drivers.dell_emc.vnx import client
|
from cinder.volume.drivers.dell_emc.vnx import client
|
||||||
from cinder.volume.drivers.dell_emc.vnx import common
|
from cinder.volume.drivers.dell_emc.vnx import common
|
||||||
@ -96,9 +96,9 @@ class CommonAdapter(object):
|
|||||||
# empty string.
|
# empty string.
|
||||||
naviseccli_path = self.config.naviseccli_path
|
naviseccli_path = self.config.naviseccli_path
|
||||||
if naviseccli_path is None or len(naviseccli_path.strip()) == 0:
|
if naviseccli_path is None or len(naviseccli_path.strip()) == 0:
|
||||||
LOG.warning(_LW('[%(group)s] naviseccli_path is not set or set to '
|
LOG.warning('[%(group)s] naviseccli_path is not set or set to '
|
||||||
'an empty string. None will be passed into '
|
'an empty string. None will be passed into '
|
||||||
'storops.'), {'group': self.config.config_group})
|
'storops.', {'group': self.config.config_group})
|
||||||
self.config.naviseccli_path = None
|
self.config.naviseccli_path = None
|
||||||
|
|
||||||
# Check option `storage_vnx_pool_names`.
|
# Check option `storage_vnx_pool_names`.
|
||||||
@ -133,32 +133,32 @@ class CommonAdapter(object):
|
|||||||
self.config.io_port_list = io_port_list
|
self.config.io_port_list = io_port_list
|
||||||
|
|
||||||
if self.config.ignore_pool_full_threshold:
|
if self.config.ignore_pool_full_threshold:
|
||||||
LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
|
LOG.warning('[%(group)s] ignore_pool_full_threshold: True. '
|
||||||
'LUN creation will still be forced even if the '
|
'LUN creation will still be forced even if the '
|
||||||
'pool full threshold is exceeded.'),
|
'pool full threshold is exceeded.',
|
||||||
{'group': self.config.config_group})
|
{'group': self.config.config_group})
|
||||||
|
|
||||||
if self.config.destroy_empty_storage_group:
|
if self.config.destroy_empty_storage_group:
|
||||||
LOG.warning(_LW('[%(group)s] destroy_empty_storage_group: True. '
|
LOG.warning('[%(group)s] destroy_empty_storage_group: True. '
|
||||||
'Empty storage group will be deleted after volume '
|
'Empty storage group will be deleted after volume '
|
||||||
'is detached.'),
|
'is detached.',
|
||||||
{'group': self.config.config_group})
|
{'group': self.config.config_group})
|
||||||
|
|
||||||
if not self.config.initiator_auto_registration:
|
if not self.config.initiator_auto_registration:
|
||||||
LOG.info(_LI('[%(group)s] initiator_auto_registration: False. '
|
LOG.info('[%(group)s] initiator_auto_registration: False. '
|
||||||
'Initiator auto registration is not enabled. '
|
'Initiator auto registration is not enabled. '
|
||||||
'Please register initiator manually.'),
|
'Please register initiator manually.',
|
||||||
{'group': self.config.config_group})
|
{'group': self.config.config_group})
|
||||||
|
|
||||||
if self.config.force_delete_lun_in_storagegroup:
|
if self.config.force_delete_lun_in_storagegroup:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'[%(group)s] force_delete_lun_in_storagegroup=True'),
|
'[%(group)s] force_delete_lun_in_storagegroup=True',
|
||||||
{'group': self.config.config_group})
|
{'group': self.config.config_group})
|
||||||
|
|
||||||
if self.config.ignore_pool_full_threshold:
|
if self.config.ignore_pool_full_threshold:
|
||||||
LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
|
LOG.warning('[%(group)s] ignore_pool_full_threshold: True. '
|
||||||
'LUN creation will still be forced even if the '
|
'LUN creation will still be forced even if the '
|
||||||
'pool full threshold is exceeded.'),
|
'pool full threshold is exceeded.',
|
||||||
{'group': self.config.config_group})
|
{'group': self.config.config_group})
|
||||||
|
|
||||||
def _build_port_str(self, port):
|
def _build_port_str(self, port):
|
||||||
@ -217,10 +217,10 @@ class CommonAdapter(object):
|
|||||||
tier = specs.tier
|
tier = specs.tier
|
||||||
|
|
||||||
volume_metadata['snapcopy'] = 'False'
|
volume_metadata['snapcopy'] = 'False'
|
||||||
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
|
LOG.info('Create Volume: %(volume)s Size: %(size)s '
|
||||||
'pool: %(pool)s '
|
'pool: %(pool)s '
|
||||||
'provision: %(provision)s '
|
'provision: %(provision)s '
|
||||||
'tier: %(tier)s '),
|
'tier: %(tier)s ',
|
||||||
{'volume': volume_name,
|
{'volume': volume_name,
|
||||||
'size': volume_size,
|
'size': volume_size,
|
||||||
'pool': pool,
|
'pool': pool,
|
||||||
@ -463,7 +463,7 @@ class CommonAdapter(object):
|
|||||||
model_update = {}
|
model_update = {}
|
||||||
volumes_model_update = []
|
volumes_model_update = []
|
||||||
model_update['status'] = group.status
|
model_update['status'] = group.status
|
||||||
LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
|
LOG.info('Start to delete consistency group: %(cg_name)s',
|
||||||
{'cg_name': cg_name})
|
{'cg_name': cg_name})
|
||||||
|
|
||||||
self.client.delete_consistency_group(cg_name)
|
self.client.delete_consistency_group(cg_name)
|
||||||
@ -491,8 +491,8 @@ class CommonAdapter(object):
|
|||||||
def do_create_cgsnap(self, group_name, snap_name, snapshots):
|
def do_create_cgsnap(self, group_name, snap_name, snapshots):
|
||||||
model_update = {}
|
model_update = {}
|
||||||
snapshots_model_update = []
|
snapshots_model_update = []
|
||||||
LOG.info(_LI('Creating consistency snapshot for group'
|
LOG.info('Creating consistency snapshot for group'
|
||||||
': %(group_name)s'),
|
': %(group_name)s',
|
||||||
{'group_name': group_name})
|
{'group_name': group_name})
|
||||||
|
|
||||||
self.client.create_cg_snapshot(snap_name,
|
self.client.create_cg_snapshot(snap_name,
|
||||||
@ -516,8 +516,8 @@ class CommonAdapter(object):
|
|||||||
model_update = {}
|
model_update = {}
|
||||||
snapshots_model_update = []
|
snapshots_model_update = []
|
||||||
model_update['status'] = snap_status
|
model_update['status'] = snap_status
|
||||||
LOG.info(_LI('Deleting consistency snapshot %(snap_name)s for '
|
LOG.info('Deleting consistency snapshot %(snap_name)s for '
|
||||||
'group: %(group_name)s'),
|
'group: %(group_name)s',
|
||||||
{'snap_name': snap_name,
|
{'snap_name': snap_name,
|
||||||
'group_name': group_name})
|
'group_name': group_name})
|
||||||
|
|
||||||
@ -640,10 +640,10 @@ class CommonAdapter(object):
|
|||||||
'Non-existent pools: %s') % ','.join(nonexistent_pools)
|
'Non-existent pools: %s') % ','.join(nonexistent_pools)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
if nonexistent_pools:
|
if nonexistent_pools:
|
||||||
LOG.warning(_LW('The following specified storage pools '
|
LOG.warning('The following specified storage pools '
|
||||||
'do not exist: %(nonexistent)s. '
|
'do not exist: %(nonexistent)s. '
|
||||||
'This host will only manage the storage '
|
'This host will only manage the storage '
|
||||||
'pools: %(exist)s'),
|
'pools: %(exist)s',
|
||||||
{'nonexistent': ','.join(nonexistent_pools),
|
{'nonexistent': ','.join(nonexistent_pools),
|
||||||
'exist': ','.join(pool_names)})
|
'exist': ','.join(pool_names)})
|
||||||
else:
|
else:
|
||||||
@ -651,8 +651,8 @@ class CommonAdapter(object):
|
|||||||
','.join(pool_names))
|
','.join(pool_names))
|
||||||
else:
|
else:
|
||||||
pool_names = [p.name for p in array_pools]
|
pool_names = [p.name for p in array_pools]
|
||||||
LOG.info(_LI('No storage pool is configured. This host will '
|
LOG.info('No storage pool is configured. This host will '
|
||||||
'manage all the pools on the VNX system.'))
|
'manage all the pools on the VNX system.')
|
||||||
|
|
||||||
return [pool for pool in array_pools if pool.name in pool_names]
|
return [pool for pool in array_pools if pool.name in pool_names]
|
||||||
|
|
||||||
@ -684,7 +684,7 @@ class CommonAdapter(object):
|
|||||||
# or Deleting.
|
# or Deleting.
|
||||||
if pool.state in common.PoolState.VALID_CREATE_LUN_STATE:
|
if pool.state in common.PoolState.VALID_CREATE_LUN_STATE:
|
||||||
pool_stats['free_capacity_gb'] = 0
|
pool_stats['free_capacity_gb'] = 0
|
||||||
LOG.warning(_LW('Storage Pool [%(pool)s] is [%(state)s].'),
|
LOG.warning('Storage Pool [%(pool)s] is [%(state)s].',
|
||||||
{'pool': pool.name,
|
{'pool': pool.name,
|
||||||
'state': pool.state})
|
'state': pool.state})
|
||||||
else:
|
else:
|
||||||
@ -692,9 +692,9 @@ class CommonAdapter(object):
|
|||||||
|
|
||||||
if (pool_feature.max_pool_luns <=
|
if (pool_feature.max_pool_luns <=
|
||||||
pool_feature.total_pool_luns):
|
pool_feature.total_pool_luns):
|
||||||
LOG.warning(_LW('Maximum number of Pool LUNs %(max_luns)s '
|
LOG.warning('Maximum number of Pool LUNs %(max_luns)s '
|
||||||
'have been created for %(pool_name)s. '
|
'have been created for %(pool_name)s. '
|
||||||
'No more LUN creation can be done.'),
|
'No more LUN creation can be done.',
|
||||||
{'max_luns': pool_feature.max_pool_luns,
|
{'max_luns': pool_feature.max_pool_luns,
|
||||||
'pool_name': pool.name})
|
'pool_name': pool.name})
|
||||||
pool_stats['free_capacity_gb'] = 0
|
pool_stats['free_capacity_gb'] = 0
|
||||||
@ -1018,15 +1018,14 @@ class CommonAdapter(object):
|
|||||||
lun = self.client.get_lun(lun_id=volume.vnx_lun_id)
|
lun = self.client.get_lun(lun_id=volume.vnx_lun_id)
|
||||||
hostname = host.name
|
hostname = host.name
|
||||||
if not sg.existed:
|
if not sg.existed:
|
||||||
LOG.warning(_LW("Storage Group %s is not found. "
|
LOG.warning("Storage Group %s is not found. "
|
||||||
"Nothing can be done in terminate_connection()."),
|
"Nothing can be done in terminate_connection().",
|
||||||
hostname)
|
hostname)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
sg.detach_alu(lun)
|
sg.detach_alu(lun)
|
||||||
except storops_ex.VNXDetachAluNotFoundError:
|
except storops_ex.VNXDetachAluNotFoundError:
|
||||||
LOG.warning(_LW("Volume %(vol)s is not in Storage Group"
|
LOG.warning("Volume %(vol)s is not in Storage Group %(sg)s.",
|
||||||
" %(sg)s."),
|
|
||||||
{'vol': volume.name, 'sg': hostname})
|
{'vol': volume.name, 'sg': hostname})
|
||||||
|
|
||||||
def build_terminate_connection_return_data(self, host, sg):
|
def build_terminate_connection_return_data(self, host, sg):
|
||||||
@ -1042,19 +1041,19 @@ class CommonAdapter(object):
|
|||||||
|
|
||||||
def _destroy_empty_sg(self, host, sg):
|
def _destroy_empty_sg(self, host, sg):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Storage Group %s is empty."), sg.name)
|
LOG.info("Storage Group %s is empty.", sg.name)
|
||||||
sg.disconnect_host(sg.name)
|
sg.disconnect_host(sg.name)
|
||||||
sg.delete()
|
sg.delete()
|
||||||
if self.itor_auto_dereg:
|
if self.itor_auto_dereg:
|
||||||
self._deregister_initiator(host)
|
self._deregister_initiator(host)
|
||||||
except storops_ex.StoropsException:
|
except storops_ex.StoropsException:
|
||||||
LOG.warning(_LW("Failed to destroy Storage Group %s."),
|
LOG.warning("Failed to destroy Storage Group %s.",
|
||||||
sg.name)
|
sg.name)
|
||||||
try:
|
try:
|
||||||
sg.connect_host(sg.name)
|
sg.connect_host(sg.name)
|
||||||
except storops_ex.StoropsException:
|
except storops_ex.StoropsException:
|
||||||
LOG.warning(_LW("Failed to connect host %(host)s "
|
LOG.warning("Failed to connect host %(host)s "
|
||||||
"back to storage group %(sg)s."),
|
"back to storage group %(sg)s.",
|
||||||
{'host': sg.name, 'sg': sg.name})
|
{'host': sg.name, 'sg': sg.name})
|
||||||
|
|
||||||
def _deregister_initiator(self, host):
|
def _deregister_initiator(self, host):
|
||||||
@ -1062,7 +1061,7 @@ class CommonAdapter(object):
|
|||||||
try:
|
try:
|
||||||
self.client.deregister_initiators(initiators)
|
self.client.deregister_initiators(initiators)
|
||||||
except storops_ex:
|
except storops_ex:
|
||||||
LOG.warning(_LW("Failed to deregister the initiators %s"),
|
LOG.warning("Failed to deregister the initiators %s",
|
||||||
initiators)
|
initiators)
|
||||||
|
|
||||||
def _is_allowed_port(self, port):
|
def _is_allowed_port(self, port):
|
||||||
@ -1138,7 +1137,7 @@ class CommonAdapter(object):
|
|||||||
volume.name, lun_size,
|
volume.name, lun_size,
|
||||||
provision, tier)
|
provision, tier)
|
||||||
|
|
||||||
LOG.info(_LI('Successfully setup replication for %s.'), volume.id)
|
LOG.info('Successfully setup replication for %s.', volume.id)
|
||||||
rep_update.update({'replication_status':
|
rep_update.update({'replication_status':
|
||||||
fields.ReplicationStatus.ENABLED})
|
fields.ReplicationStatus.ENABLED})
|
||||||
return rep_update
|
return rep_update
|
||||||
@ -1152,7 +1151,7 @@ class CommonAdapter(object):
|
|||||||
mirror_view = self.build_mirror_view(self.config, True)
|
mirror_view = self.build_mirror_view(self.config, True)
|
||||||
mirror_view.destroy_mirror(mirror_name, volume.name)
|
mirror_view.destroy_mirror(mirror_name, volume.name)
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI('Successfully destroyed replication for volume: %s'),
|
'Successfully destroyed replication for volume: %s',
|
||||||
volume.id)
|
volume.id)
|
||||||
|
|
||||||
def build_mirror_view(self, configuration, failover=True):
|
def build_mirror_view(self, configuration, failover=True):
|
||||||
@ -1164,7 +1163,7 @@ class CommonAdapter(object):
|
|||||||
"""
|
"""
|
||||||
rep_devices = configuration.replication_device
|
rep_devices = configuration.replication_device
|
||||||
if not rep_devices:
|
if not rep_devices:
|
||||||
LOG.info(_LI('Replication is not configured on backend: %s.'),
|
LOG.info('Replication is not configured on backend: %s.',
|
||||||
configuration.config_group)
|
configuration.config_group)
|
||||||
return None
|
return None
|
||||||
elif len(rep_devices) == 1:
|
elif len(rep_devices) == 1:
|
||||||
@ -1225,12 +1224,12 @@ class CommonAdapter(object):
|
|||||||
try:
|
try:
|
||||||
mirror_view.promote_image(mirror_name)
|
mirror_view.promote_image(mirror_name)
|
||||||
except storops_ex.VNXMirrorException as ex:
|
except storops_ex.VNXMirrorException as ex:
|
||||||
msg = _LE(
|
LOG.error(
|
||||||
'Failed to failover volume %(volume_id)s '
|
'Failed to failover volume %(volume_id)s '
|
||||||
'to %(target)s: %(error)s.')
|
'to %(target)s: %(error)s.',
|
||||||
LOG.error(msg, {'volume_id': volume.id,
|
{'volume_id': volume.id,
|
||||||
'target': secondary_backend_id,
|
'target': secondary_backend_id,
|
||||||
'error': ex},)
|
'error': ex})
|
||||||
new_status = fields.ReplicationStatus.ERROR
|
new_status = fields.ReplicationStatus.ERROR
|
||||||
else:
|
else:
|
||||||
# Transfer ownership to secondary_backend_id and
|
# Transfer ownership to secondary_backend_id and
|
||||||
@ -1354,8 +1353,7 @@ class ISCSIAdapter(CommonAdapter):
|
|||||||
raise exception.InvalidConfigurationValue(
|
raise exception.InvalidConfigurationValue(
|
||||||
option=option,
|
option=option,
|
||||||
value=iscsi_initiators)
|
value=iscsi_initiators)
|
||||||
LOG.info(_LI("[%(group)s] iscsi_initiators is configured: "
|
LOG.info("[%(group)s] iscsi_initiators is configured: %(value)s",
|
||||||
"%(value)s"),
|
|
||||||
{'group': self.config.config_group,
|
{'group': self.config.config_group,
|
||||||
'value': self.config.iscsi_initiators})
|
'value': self.config.iscsi_initiators})
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ if storops:
|
|||||||
from storops.lib import tasks as storops_tasks
|
from storops.lib import tasks as storops_tasks
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW, _LE, _LI
|
from cinder.i18n import _
|
||||||
from cinder import utils as cinder_utils
|
from cinder import utils as cinder_utils
|
||||||
from cinder.volume.drivers.dell_emc.vnx import common
|
from cinder.volume.drivers.dell_emc.vnx import common
|
||||||
from cinder.volume.drivers.dell_emc.vnx import const
|
from cinder.volume.drivers.dell_emc.vnx import const
|
||||||
@ -95,7 +95,7 @@ class Client(object):
|
|||||||
if queue_path:
|
if queue_path:
|
||||||
self.queue = storops_tasks.PQueue(path=queue_path)
|
self.queue = storops_tasks.PQueue(path=queue_path)
|
||||||
self.queue.start()
|
self.queue.start()
|
||||||
LOG.info(_LI('PQueue[%s] starts now.'), queue_path)
|
LOG.info('PQueue[%s] starts now.', queue_path)
|
||||||
|
|
||||||
def create_lun(self, pool, name, size, provision,
|
def create_lun(self, pool, name, size, provision,
|
||||||
tier, cg_id=None, ignore_thresholds=False):
|
tier, cg_id=None, ignore_thresholds=False):
|
||||||
@ -143,8 +143,8 @@ class Client(object):
|
|||||||
if smp_attached_snap:
|
if smp_attached_snap:
|
||||||
smp_attached_snap.delete()
|
smp_attached_snap.delete()
|
||||||
except storops_ex.VNXLunNotFoundError as ex:
|
except storops_ex.VNXLunNotFoundError as ex:
|
||||||
LOG.info(_LI("LUN %(name)s is already deleted. This message can "
|
LOG.info("LUN %(name)s is already deleted. This message can "
|
||||||
"be safely ignored. Message: %(msg)s"),
|
"be safely ignored. Message: %(msg)s",
|
||||||
{'name': name, 'msg': ex.message})
|
{'name': name, 'msg': ex.message})
|
||||||
|
|
||||||
def cleanup_async_lun(self, name, force=False):
|
def cleanup_async_lun(self, name, force=False):
|
||||||
@ -160,8 +160,8 @@ class Client(object):
|
|||||||
def delay_delete_lun(self, name):
|
def delay_delete_lun(self, name):
|
||||||
"""Delay the deletion by putting it in a storops queue."""
|
"""Delay the deletion by putting it in a storops queue."""
|
||||||
self.queue.put(self.vnx.delete_lun, name=name)
|
self.queue.put(self.vnx.delete_lun, name=name)
|
||||||
LOG.info(_LI("VNX object has been added to queue for later"
|
LOG.info("VNX object has been added to queue for later"
|
||||||
" deletion: %s"), name)
|
" deletion: %s", name)
|
||||||
|
|
||||||
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
|
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
|
||||||
backoff_rate=1)
|
backoff_rate=1)
|
||||||
@ -173,8 +173,8 @@ class Client(object):
|
|||||||
lun.poll = poll
|
lun.poll = poll
|
||||||
lun.expand(new_size, ignore_thresholds=True)
|
lun.expand(new_size, ignore_thresholds=True)
|
||||||
except storops_ex.VNXLunExpandSizeError as ex:
|
except storops_ex.VNXLunExpandSizeError as ex:
|
||||||
LOG.warning(_LW("LUN %(name)s is already expanded. "
|
LOG.warning("LUN %(name)s is already expanded. "
|
||||||
"Message: %(msg)s."),
|
"Message: %(msg)s.",
|
||||||
{'name': name, 'msg': ex.message})
|
{'name': name, 'msg': ex.message})
|
||||||
|
|
||||||
except storops_ex.VNXLunPreparingError as ex:
|
except storops_ex.VNXLunPreparingError as ex:
|
||||||
@ -182,8 +182,7 @@ class Client(object):
|
|||||||
# is 'Preparing'. Wait for a while so that the LUN may get out of
|
# is 'Preparing'. Wait for a while so that the LUN may get out of
|
||||||
# the transitioning state.
|
# the transitioning state.
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.warning(_LW("LUN %(name)s is not ready for extension: "
|
LOG.warning("LUN %(name)s is not ready for extension: %(msg)s",
|
||||||
"%(msg)s"),
|
|
||||||
{'name': name, 'msg': ex.message})
|
{'name': name, 'msg': ex.message})
|
||||||
|
|
||||||
utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
|
utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
|
||||||
@ -206,7 +205,7 @@ class Client(object):
|
|||||||
if not session.existed:
|
if not session.existed:
|
||||||
return True
|
return True
|
||||||
elif session.current_state in ('FAULTED', 'STOPPED'):
|
elif session.current_state in ('FAULTED', 'STOPPED'):
|
||||||
LOG.warning(_LW('Session is %s, need to handled then.'),
|
LOG.warning('Session is %s, need to handled then.',
|
||||||
session.current_state)
|
session.current_state)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@ -243,15 +242,15 @@ class Client(object):
|
|||||||
session = self.vnx.get_migration_session(src_id)
|
session = self.vnx.get_migration_session(src_id)
|
||||||
src_lun = self.vnx.get_lun(lun_id=src_id)
|
src_lun = self.vnx.get_lun(lun_id=src_id)
|
||||||
if session.existed:
|
if session.existed:
|
||||||
LOG.warning(_LW('Cancelling migration session: '
|
LOG.warning('Cancelling migration session: '
|
||||||
'%(src_id)s -> %(dst_id)s.'),
|
'%(src_id)s -> %(dst_id)s.',
|
||||||
{'src_id': src_id,
|
{'src_id': src_id,
|
||||||
'dst_id': dst_id})
|
'dst_id': dst_id})
|
||||||
try:
|
try:
|
||||||
src_lun.cancel_migrate()
|
src_lun.cancel_migrate()
|
||||||
except storops_ex.VNXLunNotMigratingError:
|
except storops_ex.VNXLunNotMigratingError:
|
||||||
LOG.info(_LI('The LUN is not migrating or completed, '
|
LOG.info('The LUN is not migrating or completed, '
|
||||||
'this message can be safely ignored'))
|
'this message can be safely ignored')
|
||||||
except (storops_ex.VNXLunSyncCompletedError,
|
except (storops_ex.VNXLunSyncCompletedError,
|
||||||
storops_ex.VNXMigrationError):
|
storops_ex.VNXMigrationError):
|
||||||
# Wait until session finishes
|
# Wait until session finishes
|
||||||
@ -266,8 +265,8 @@ class Client(object):
|
|||||||
snap_name, allow_rw=True, auto_delete=False,
|
snap_name, allow_rw=True, auto_delete=False,
|
||||||
keep_for=keep_for)
|
keep_for=keep_for)
|
||||||
except storops_ex.VNXSnapNameInUseError as ex:
|
except storops_ex.VNXSnapNameInUseError as ex:
|
||||||
LOG.warning(_LW('Snapshot %(name)s already exists. '
|
LOG.warning('Snapshot %(name)s already exists. '
|
||||||
'Message: %(msg)s'),
|
'Message: %(msg)s',
|
||||||
{'name': snap_name, 'msg': ex.message})
|
{'name': snap_name, 'msg': ex.message})
|
||||||
|
|
||||||
def delete_snapshot(self, snapshot_name):
|
def delete_snapshot(self, snapshot_name):
|
||||||
@ -277,13 +276,13 @@ class Client(object):
|
|||||||
try:
|
try:
|
||||||
snap.delete()
|
snap.delete()
|
||||||
except storops_ex.VNXSnapNotExistsError as ex:
|
except storops_ex.VNXSnapNotExistsError as ex:
|
||||||
LOG.warning(_LW("Snapshot %(name)s may be deleted already. "
|
LOG.warning("Snapshot %(name)s may be deleted already. "
|
||||||
"Message: %(msg)s"),
|
"Message: %(msg)s",
|
||||||
{'name': snapshot_name, 'msg': ex.message})
|
{'name': snapshot_name, 'msg': ex.message})
|
||||||
except storops_ex.VNXDeleteAttachedSnapError as ex:
|
except storops_ex.VNXDeleteAttachedSnapError as ex:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.warning(_LW("Failed to delete snapshot %(name)s "
|
LOG.warning("Failed to delete snapshot %(name)s "
|
||||||
"which is in use. Message: %(msg)s"),
|
"which is in use. Message: %(msg)s",
|
||||||
{'name': snapshot_name, 'msg': ex.message})
|
{'name': snapshot_name, 'msg': ex.message})
|
||||||
|
|
||||||
def copy_snapshot(self, snap_name, new_snap_name):
|
def copy_snapshot(self, snap_name, new_snap_name):
|
||||||
@ -295,8 +294,8 @@ class Client(object):
|
|||||||
try:
|
try:
|
||||||
return lun.create_mount_point(name=smp_name)
|
return lun.create_mount_point(name=smp_name)
|
||||||
except storops_ex.VNXLunNameInUseError as ex:
|
except storops_ex.VNXLunNameInUseError as ex:
|
||||||
LOG.warning(_LW('Mount point %(name)s already exists. '
|
LOG.warning('Mount point %(name)s already exists. '
|
||||||
'Message: %(msg)s'),
|
'Message: %(msg)s',
|
||||||
{'name': smp_name, 'msg': ex.message})
|
{'name': smp_name, 'msg': ex.message})
|
||||||
# Ignore the failure that due to retry.
|
# Ignore the failure that due to retry.
|
||||||
return self.vnx.get_lun(name=smp_name)
|
return self.vnx.get_lun(name=smp_name)
|
||||||
@ -306,9 +305,9 @@ class Client(object):
|
|||||||
try:
|
try:
|
||||||
lun.attach_snap(snap=snap_name)
|
lun.attach_snap(snap=snap_name)
|
||||||
except storops_ex.VNXSnapAlreadyMountedError as ex:
|
except storops_ex.VNXSnapAlreadyMountedError as ex:
|
||||||
LOG.warning(_LW("Snapshot %(snap_name)s is attached to "
|
LOG.warning("Snapshot %(snap_name)s is attached to "
|
||||||
"snapshot mount point %(smp_name)s already. "
|
"snapshot mount point %(smp_name)s already. "
|
||||||
"Message: %(msg)s"),
|
"Message: %(msg)s",
|
||||||
{'snap_name': snap_name,
|
{'snap_name': snap_name,
|
||||||
'smp_name': smp_name,
|
'smp_name': smp_name,
|
||||||
'msg': ex.message})
|
'msg': ex.message})
|
||||||
@ -318,8 +317,8 @@ class Client(object):
|
|||||||
try:
|
try:
|
||||||
lun.detach_snap()
|
lun.detach_snap()
|
||||||
except storops_ex.VNXSnapNotAttachedError as ex:
|
except storops_ex.VNXSnapNotAttachedError as ex:
|
||||||
LOG.warning(_LW("Snapshot mount point %(smp_name)s is not "
|
LOG.warning("Snapshot mount point %(smp_name)s is not "
|
||||||
"currently attached. Message: %(msg)s"),
|
"currently attached. Message: %(msg)s",
|
||||||
{'smp_name': smp_name, 'msg': ex.message})
|
{'smp_name': smp_name, 'msg': ex.message})
|
||||||
|
|
||||||
def modify_snapshot(self, snap_name, allow_rw=None,
|
def modify_snapshot(self, snap_name, allow_rw=None,
|
||||||
@ -417,7 +416,7 @@ class Client(object):
|
|||||||
try:
|
try:
|
||||||
lun.enable_compression(ignore_thresholds=True)
|
lun.enable_compression(ignore_thresholds=True)
|
||||||
except storops_ex.VNXCompressionAlreadyEnabledError:
|
except storops_ex.VNXCompressionAlreadyEnabledError:
|
||||||
LOG.warning(_LW("Compression has already been enabled on %s."),
|
LOG.warning("Compression has already been enabled on %s.",
|
||||||
lun.name)
|
lun.name)
|
||||||
|
|
||||||
def get_vnx_enabler_status(self):
|
def get_vnx_enabler_status(self):
|
||||||
@ -433,8 +432,8 @@ class Client(object):
|
|||||||
self.sg_cache[name] = self.vnx.create_sg(name)
|
self.sg_cache[name] = self.vnx.create_sg(name)
|
||||||
except storops_ex.VNXStorageGroupNameInUseError as ex:
|
except storops_ex.VNXStorageGroupNameInUseError as ex:
|
||||||
# Ignore the failure due to retry
|
# Ignore the failure due to retry
|
||||||
LOG.warning(_LW('Storage group %(name)s already exists. '
|
LOG.warning('Storage group %(name)s already exists. '
|
||||||
'Message: %(msg)s'),
|
'Message: %(msg)s',
|
||||||
{'name': name, 'msg': ex.message})
|
{'name': name, 'msg': ex.message})
|
||||||
self.sg_cache[name] = self.vnx.get_sg(name=name)
|
self.sg_cache[name] = self.vnx.get_sg(name=name)
|
||||||
|
|
||||||
@ -469,8 +468,8 @@ class Client(object):
|
|||||||
storage_group.connect_hba(port, initiator_id, host.name,
|
storage_group.connect_hba(port, initiator_id, host.name,
|
||||||
host_ip=host.ip)
|
host_ip=host.ip)
|
||||||
except storops_ex.VNXStorageGroupError as ex:
|
except storops_ex.VNXStorageGroupError as ex:
|
||||||
LOG.warning(_LW('Failed to set path to port %(port)s for '
|
LOG.warning('Failed to set path to port %(port)s for '
|
||||||
'initiator %(hba_id)s. Message: %(msg)s'),
|
'initiator %(hba_id)s. Message: %(msg)s',
|
||||||
{'port': port, 'hba_id': initiator_id,
|
{'port': port, 'hba_id': initiator_id,
|
||||||
'msg': ex.message})
|
'msg': ex.message})
|
||||||
|
|
||||||
@ -499,9 +498,9 @@ class Client(object):
|
|||||||
except storops_ex.VNXNoHluAvailableError as ex:
|
except storops_ex.VNXNoHluAvailableError as ex:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
# Reach the max times of retry, fail the attach action.
|
# Reach the max times of retry, fail the attach action.
|
||||||
LOG.error(_LE('Failed to add %(lun)s into %(sg)s after '
|
LOG.error('Failed to add %(lun)s into %(sg)s after '
|
||||||
'%(tried)s tries. Reach the max retry times. '
|
'%(tried)s tries. Reach the max retry times. '
|
||||||
'Message: %(msg)s'),
|
'Message: %(msg)s',
|
||||||
{'lun': lun.lun_id, 'sg': storage_group.name,
|
{'lun': lun.lun_id, 'sg': storage_group.name,
|
||||||
'tried': max_retries, 'msg': ex.message})
|
'tried': max_retries, 'msg': ex.message})
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ from oslo_utils import importutils
|
|||||||
storops = importutils.try_import('storops')
|
storops = importutils.try_import('storops')
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vnx import const
|
from cinder.volume.drivers.dell_emc.vnx import const
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
@ -201,9 +201,9 @@ class ExtraSpecs(object):
|
|||||||
:param enabler_status: Instance of VNXEnablerStatus
|
:param enabler_status: Instance of VNXEnablerStatus
|
||||||
"""
|
"""
|
||||||
if "storagetype:pool" in self.specs:
|
if "storagetype:pool" in self.specs:
|
||||||
LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
|
LOG.warning("Extra spec key 'storagetype:pool' is obsoleted "
|
||||||
"since driver version 5.1.0. This key will be "
|
"since driver version 5.1.0. This key will be "
|
||||||
"ignored."))
|
"ignored.")
|
||||||
|
|
||||||
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
|
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
|
||||||
self._tier is not None):
|
self._tier is not None):
|
||||||
@ -417,7 +417,7 @@ class ReplicationDeviceList(list):
|
|||||||
device = self._device_map[backend_id]
|
device = self._device_map[backend_id]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
device = None
|
device = None
|
||||||
LOG.warning(_LW('Unable to find secondary device named: %s'),
|
LOG.warning('Unable to find secondary device named: %s',
|
||||||
backend_id)
|
backend_id)
|
||||||
return device
|
return device
|
||||||
|
|
||||||
@ -483,7 +483,7 @@ class VNXMirrorView(object):
|
|||||||
mv = self.primary_client.get_mirror(mirror_name)
|
mv = self.primary_client.get_mirror(mirror_name)
|
||||||
if not mv.existed:
|
if not mv.existed:
|
||||||
# We will skip the mirror operations if not existed
|
# We will skip the mirror operations if not existed
|
||||||
LOG.warning(_LW('Mirror view %s was deleted already.'),
|
LOG.warning('Mirror view %s was deleted already.',
|
||||||
mirror_name)
|
mirror_name)
|
||||||
return
|
return
|
||||||
self.fracture_image(mirror_name)
|
self.fracture_image(mirror_name)
|
||||||
|
@ -24,10 +24,10 @@ from taskflow import task
|
|||||||
from taskflow.types import failure
|
from taskflow.types import failure
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vnx import common
|
from cinder.volume.drivers.dell_emc.vnx import common
|
||||||
from cinder.volume.drivers.dell_emc.vnx import const
|
from cinder.volume.drivers.dell_emc.vnx import const
|
||||||
from cinder.volume.drivers.dell_emc.vnx import utils
|
from cinder.volume.drivers.dell_emc.vnx import utils
|
||||||
from cinder.i18n import _, _LI, _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -60,8 +60,8 @@ class MigrateLunTask(task.Task):
|
|||||||
|
|
||||||
def revert(self, result, client, src_id, dst_id, *args, **kwargs):
|
def revert(self, result, client, src_id, dst_id, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method)s: cleanup migration session: '
|
LOG.warning('%(method)s: cleanup migration session: '
|
||||||
'%(src_id)s -> %(dst_id)s.'),
|
'%(src_id)s -> %(dst_id)s.',
|
||||||
{'method': method_name,
|
{'method': method_name,
|
||||||
'src_id': src_id,
|
'src_id': src_id,
|
||||||
'dst_id': dst_id})
|
'dst_id': dst_id})
|
||||||
@ -98,7 +98,7 @@ class CreateLunTask(task.Task):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('%(method_name)s: delete lun %(lun_name)s'),
|
LOG.warning('%(method_name)s: delete lun %(lun_name)s',
|
||||||
{'method_name': method_name, 'lun_name': lun_name})
|
{'method_name': method_name, 'lun_name': lun_name})
|
||||||
client.delete_lun(lun_name)
|
client.delete_lun(lun_name)
|
||||||
|
|
||||||
@ -117,9 +117,9 @@ class CopySnapshotTask(task.Task):
|
|||||||
def revert(self, result, client, snap_name, new_snap_name,
|
def revert(self, result, client, snap_name, new_snap_name,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: delete the '
|
LOG.warning('%(method_name)s: delete the '
|
||||||
'copied snapshot %(new_name)s of '
|
'copied snapshot %(new_name)s of '
|
||||||
'%(source_name)s.'),
|
'%(source_name)s.',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'new_name': new_snap_name,
|
'new_name': new_snap_name,
|
||||||
'source_name': snap_name})
|
'source_name': snap_name})
|
||||||
@ -146,7 +146,7 @@ class CreateSMPTask(task.Task):
|
|||||||
|
|
||||||
def revert(self, result, client, smp_name, *args, **kwargs):
|
def revert(self, result, client, smp_name, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: delete mount point %(name)s'),
|
LOG.warning('%(method_name)s: delete mount point %(name)s',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'name': smp_name})
|
'name': smp_name})
|
||||||
client.delete_lun(smp_name)
|
client.delete_lun(smp_name)
|
||||||
@ -164,7 +164,7 @@ class AttachSnapTask(task.Task):
|
|||||||
|
|
||||||
def revert(self, result, client, smp_name, *args, **kwargs):
|
def revert(self, result, client, smp_name, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: detach mount point %(smp_name)s'),
|
LOG.warning('%(method_name)s: detach mount point %(smp_name)s',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'smp_name': smp_name})
|
'smp_name': smp_name})
|
||||||
client.detach_snapshot(smp_name)
|
client.detach_snapshot(smp_name)
|
||||||
@ -178,15 +178,15 @@ class CreateSnapshotTask(task.Task):
|
|||||||
def execute(self, client, snap_name, lun_id, keep_for=None,
|
def execute(self, client, snap_name, lun_id, keep_for=None,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
LOG.debug('%s.execute', self.__class__.__name__)
|
LOG.debug('%s.execute', self.__class__.__name__)
|
||||||
LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'),
|
LOG.info('Create snapshot: %(snapshot)s: lun: %(lun)s',
|
||||||
{'snapshot': snap_name,
|
{'snapshot': snap_name,
|
||||||
'lun': lun_id})
|
'lun': lun_id})
|
||||||
client.create_snapshot(lun_id, snap_name, keep_for=keep_for)
|
client.create_snapshot(lun_id, snap_name, keep_for=keep_for)
|
||||||
|
|
||||||
def revert(self, result, client, snap_name, *args, **kwargs):
|
def revert(self, result, client, snap_name, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: '
|
LOG.warning('%(method_name)s: '
|
||||||
'delete temp snapshot %(snap_name)s'),
|
'delete temp snapshot %(snap_name)s',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'snap_name': snap_name})
|
'snap_name': snap_name})
|
||||||
client.delete_snapshot(snap_name)
|
client.delete_snapshot(snap_name)
|
||||||
@ -201,8 +201,8 @@ class ModifySnapshotTask(task.Task):
|
|||||||
|
|
||||||
def revert(self, result, client, snap_name, *args, **kwargs):
|
def revert(self, result, client, snap_name, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: '
|
LOG.warning('%(method_name)s: '
|
||||||
'setting snapshot %(snap_name)s to read-only.'),
|
'setting snapshot %(snap_name)s to read-only.',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'snap_name': snap_name})
|
'snap_name': snap_name})
|
||||||
client.modify_snapshot(snap_name, allow_rw=False)
|
client.modify_snapshot(snap_name, allow_rw=False)
|
||||||
@ -268,8 +268,8 @@ class CreateCGSnapshotTask(task.Task):
|
|||||||
|
|
||||||
def revert(self, client, cg_snap_name, cg_name, *args, **kwargs):
|
def revert(self, client, cg_snap_name, cg_name, *args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method_name)s: '
|
LOG.warning('%(method_name)s: '
|
||||||
'deleting CG snapshot %(snap_name)s.'),
|
'deleting CG snapshot %(snap_name)s.',
|
||||||
{'method_name': method_name,
|
{'method_name': method_name,
|
||||||
'snap_name': cg_snap_name})
|
'snap_name': cg_snap_name})
|
||||||
client.delete_cg_snapshot(cg_snap_name)
|
client.delete_cg_snapshot(cg_snap_name)
|
||||||
@ -288,8 +288,8 @@ class CreateMirrorTask(task.Task):
|
|||||||
def revert(self, result, mirror, mirror_name,
|
def revert(self, result, mirror, mirror_name,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method)s: removing mirror '
|
LOG.warning('%(method)s: removing mirror '
|
||||||
'view %(name)s.'),
|
'view %(name)s.',
|
||||||
{'method': method_name,
|
{'method': method_name,
|
||||||
'name': mirror_name})
|
'name': mirror_name})
|
||||||
mirror.delete_mirror(mirror_name)
|
mirror.delete_mirror(mirror_name)
|
||||||
@ -308,8 +308,8 @@ class AddMirrorImageTask(task.Task):
|
|||||||
def revert(self, result, mirror, mirror_name,
|
def revert(self, result, mirror, mirror_name,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
method_name = '%s.revert' % self.__class__.__name__
|
method_name = '%s.revert' % self.__class__.__name__
|
||||||
LOG.warning(_LW('%(method)s: removing secondary image '
|
LOG.warning('%(method)s: removing secondary image '
|
||||||
'from %(name)s.'),
|
'from %(name)s.',
|
||||||
{'method': method_name,
|
{'method': method_name,
|
||||||
'name': mirror_name})
|
'name': mirror_name})
|
||||||
mirror.remove_image(mirror_name)
|
mirror.remove_image(mirror_name)
|
||||||
|
@ -24,7 +24,7 @@ from oslo_utils import importutils
|
|||||||
storops = importutils.try_import('storops')
|
storops = importutils.try_import('storops')
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.dell_emc.vnx import common
|
from cinder.volume.drivers.dell_emc.vnx import common
|
||||||
from cinder.volume.drivers.san.san import san_opts
|
from cinder.volume.drivers.san.san import san_opts
|
||||||
from cinder.volume import utils as vol_utils
|
from cinder.volume import utils as vol_utils
|
||||||
@ -139,17 +139,17 @@ def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
|
|||||||
|
|
||||||
def validate_storage_migration(volume, target_host, src_serial, src_protocol):
|
def validate_storage_migration(volume, target_host, src_serial, src_protocol):
|
||||||
if 'location_info' not in target_host['capabilities']:
|
if 'location_info' not in target_host['capabilities']:
|
||||||
LOG.warning(_LW("Failed to get pool name and "
|
LOG.warning("Failed to get pool name and "
|
||||||
"serial number. 'location_info' "
|
"serial number. 'location_info' "
|
||||||
"from %s."), target_host['host'])
|
"from %s.", target_host['host'])
|
||||||
return False
|
return False
|
||||||
info = target_host['capabilities']['location_info']
|
info = target_host['capabilities']['location_info']
|
||||||
LOG.debug("Host for migration is %s.", info)
|
LOG.debug("Host for migration is %s.", info)
|
||||||
try:
|
try:
|
||||||
serial_number = info.split('|')[1]
|
serial_number = info.split('|')[1]
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.warning(_LW('Error on getting serial number '
|
LOG.warning('Error on getting serial number '
|
||||||
'from %s.'), target_host['host'])
|
'from %s.', target_host['host'])
|
||||||
return False
|
return False
|
||||||
if serial_number != src_serial:
|
if serial_number != src_serial:
|
||||||
LOG.debug('Skip storage-assisted migration because '
|
LOG.debug('Skip storage-assisted migration because '
|
||||||
@ -253,8 +253,8 @@ def get_migration_rate(volume):
|
|||||||
if rate.lower() in storops.VNXMigrationRate.values():
|
if rate.lower() in storops.VNXMigrationRate.values():
|
||||||
return storops.VNXMigrationRate.parse(rate.lower())
|
return storops.VNXMigrationRate.parse(rate.lower())
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unknown migration rate specified, '
|
LOG.warning('Unknown migration rate specified, '
|
||||||
'using [high] as migration rate.'))
|
'using [high] as migration rate.')
|
||||||
|
|
||||||
return storops.VNXMigrationRate.HIGH
|
return storops.VNXMigrationRate.HIGH
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ import six
|
|||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI, _LW
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
@ -156,18 +156,18 @@ class XtremIOClient(object):
|
|||||||
error = response.json()
|
error = response.json()
|
||||||
err_msg = error.get('message')
|
err_msg = error.get('message')
|
||||||
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
|
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
|
||||||
LOG.warning(_LW("object %(key)s of "
|
LOG.warning("object %(key)s of "
|
||||||
"type %(typ)s not found, %(err_msg)s"),
|
"type %(typ)s not found, %(err_msg)s",
|
||||||
{'key': key, 'typ': object_type,
|
{'key': key, 'typ': object_type,
|
||||||
'err_msg': err_msg, })
|
'err_msg': err_msg, })
|
||||||
raise exception.NotFound()
|
raise exception.NotFound()
|
||||||
elif err_msg == VOL_NOT_UNIQUE_ERR:
|
elif err_msg == VOL_NOT_UNIQUE_ERR:
|
||||||
LOG.error(_LE("can't create 2 volumes with the same name, %s"),
|
LOG.error("can't create 2 volumes with the same name, %s",
|
||||||
err_msg)
|
err_msg)
|
||||||
msg = (_('Volume by this name already exists'))
|
msg = _('Volume by this name already exists')
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
|
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
|
||||||
LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"),
|
LOG.error("Can't find volume to map %(key)s, %(msg)s",
|
||||||
{'key': key, 'msg': err_msg, })
|
{'key': key, 'msg': err_msg, })
|
||||||
raise exception.VolumeNotFound(volume_id=key)
|
raise exception.VolumeNotFound(volume_id=key)
|
||||||
elif ALREADY_MAPPED_ERR in err_msg:
|
elif ALREADY_MAPPED_ERR in err_msg:
|
||||||
@ -338,8 +338,7 @@ class XtremIOClient4(XtremIOClient):
|
|||||||
self.req(typ, 'PUT', data, idx=int(idx))
|
self.req(typ, 'PUT', data, idx=int(idx))
|
||||||
except exception.VolumeBackendAPIException:
|
except exception.VolumeBackendAPIException:
|
||||||
# reverting
|
# reverting
|
||||||
msg = _LE('Failed to rename the created snapshot, reverting.')
|
LOG.error('Failed to rename the created snapshot, reverting.')
|
||||||
LOG.error(msg)
|
|
||||||
self.req(typ, 'DELETE', idx=int(idx))
|
self.req(typ, 'DELETE', idx=int(idx))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -404,7 +403,7 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('XtremIO SW version %s'), version_text)
|
LOG.info('XtremIO SW version %s', version_text)
|
||||||
if ver[0] >= 4:
|
if ver[0] >= 4:
|
||||||
self.client = XtremIOClient4(self.configuration, self.cluster_id)
|
self.client = XtremIOClient4(self.configuration, self.cluster_id)
|
||||||
|
|
||||||
@ -466,8 +465,8 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
try:
|
try:
|
||||||
self.extend_volume(volume, volume['size'])
|
self.extend_volume(volume, volume['size'])
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('failes to extend volume %s, '
|
LOG.error('failes to extend volume %s, '
|
||||||
'reverting clone operation'), volume['id'])
|
'reverting clone operation', volume['id'])
|
||||||
# remove the volume in case resize failed
|
# remove the volume in case resize failed
|
||||||
self.delete_volume(volume)
|
self.delete_volume(volume)
|
||||||
raise
|
raise
|
||||||
@ -481,7 +480,7 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
try:
|
try:
|
||||||
self.client.req('volumes', 'DELETE', name=volume.name_id)
|
self.client.req('volumes', 'DELETE', name=volume.name_id)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
LOG.info(_LI("volume %s doesn't exist"), volume.name_id)
|
LOG.info("volume %s doesn't exist", volume.name_id)
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
"""Creates a snapshot."""
|
"""Creates a snapshot."""
|
||||||
@ -492,7 +491,7 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
try:
|
try:
|
||||||
self.client.req('volumes', 'DELETE', name=snapshot.id)
|
self.client.req('volumes', 'DELETE', name=snapshot.id)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
|
LOG.info("snapshot %s doesn't exist", snapshot.id)
|
||||||
|
|
||||||
def update_migrated_volume(self, ctxt, volume, new_volume,
|
def update_migrated_volume(self, ctxt, volume, new_volume,
|
||||||
original_volume_status):
|
original_volume_status):
|
||||||
@ -505,8 +504,8 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
data = {'name': original_name}
|
data = {'name': original_name}
|
||||||
self.client.req('volumes', 'PUT', data, name=current_name)
|
self.client.req('volumes', 'PUT', data, name=current_name)
|
||||||
except exception.VolumeBackendAPIException:
|
except exception.VolumeBackendAPIException:
|
||||||
LOG.error(_LE('Unable to rename the logical volume '
|
LOG.error('Unable to rename the logical volume '
|
||||||
'for volume: %s'), original_name)
|
'for volume: %s', original_name)
|
||||||
# If the rename fails, _name_id should be set to the new
|
# If the rename fails, _name_id should be set to the new
|
||||||
# volume id and provider_location should be set to the
|
# volume id and provider_location should be set to the
|
||||||
# one from the new volume as well.
|
# one from the new volume as well.
|
||||||
@ -603,8 +602,8 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
self.client.req('volumes', 'PUT', name=volume['id'],
|
self.client.req('volumes', 'PUT', name=volume['id'],
|
||||||
data={'vol-name': volume['name'] + '-unmanged'})
|
data={'vol-name': volume['name'] + '-unmanged'})
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
LOG.info(_LI("%(typ)s with the name %(name)s wasn't found, "
|
LOG.info("%(typ)s with the name %(name)s wasn't found, "
|
||||||
"can't unmanage") %
|
"can't unmanage",
|
||||||
{'typ': 'Snapshot' if is_snapshot else 'Volume',
|
{'typ': 'Snapshot' if is_snapshot else 'Volume',
|
||||||
'name': volume['id']})
|
'name': volume['id']})
|
||||||
raise exception.VolumeNotFound(volume_id=volume['id'])
|
raise exception.VolumeNotFound(volume_id=volume['id'])
|
||||||
@ -644,7 +643,7 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
try:
|
try:
|
||||||
self.client.req('lun-maps', 'DELETE', name=lm_name)
|
self.client.req('lun-maps', 'DELETE', name=lm_name)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
LOG.warning(_LW("terminate_connection: lun map not found"))
|
LOG.warning("terminate_connection: lun map not found")
|
||||||
|
|
||||||
def _get_password(self):
|
def _get_password(self):
|
||||||
return ''.join(RANDOM.choice
|
return ''.join(RANDOM.choice
|
||||||
@ -659,9 +658,9 @@ class XtremIOVolumeDriver(san.SanDriver):
|
|||||||
res = self.client.req('lun-maps', 'POST', data)
|
res = self.client.req('lun-maps', 'POST', data)
|
||||||
|
|
||||||
lunmap = self._obj_from_result(res)
|
lunmap = self._obj_from_result(res)
|
||||||
LOG.info(_LI('Created lun-map:\n%s'), lunmap)
|
LOG.info('Created lun-map:\n%s', lunmap)
|
||||||
except exception.XtremIOAlreadyMappedError:
|
except exception.XtremIOAlreadyMappedError:
|
||||||
LOG.info(_LI('Volume already mapped, retrieving %(ig)s, %(vol)s'),
|
LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s',
|
||||||
{'ig': ig, 'vol': volume['id']})
|
{'ig': ig, 'vol': volume['id']})
|
||||||
lunmap = self.client.find_lunmap(ig, volume['id'])
|
lunmap = self.client.find_lunmap(ig, volume['id'])
|
||||||
return lunmap
|
return lunmap
|
||||||
@ -993,8 +992,7 @@ class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
|
|||||||
discovery_chap)
|
discovery_chap)
|
||||||
# if CHAP was enabled after the initiator was created
|
# if CHAP was enabled after the initiator was created
|
||||||
if login_chap and not login_passwd:
|
if login_chap and not login_passwd:
|
||||||
LOG.info(_LI('initiator has no password while using chap,'
|
LOG.info('Initiator has no password while using chap, adding it.')
|
||||||
'adding it'))
|
|
||||||
data = {}
|
data = {}
|
||||||
(login_passwd,
|
(login_passwd,
|
||||||
d_passwd) = self._add_auth(data, login_chap, discovery_chap and
|
d_passwd) = self._add_auth(data, login_chap, discovery_chap and
|
||||||
|
@ -26,7 +26,7 @@ import requests
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW, _LI
|
from cinder.i18n import _
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -80,7 +80,7 @@ class DotHillClient(object):
|
|||||||
return
|
return
|
||||||
except exception.DotHillConnectionError:
|
except exception.DotHillConnectionError:
|
||||||
not_responding = self._curr_ip_addr
|
not_responding = self._curr_ip_addr
|
||||||
LOG.exception(_LE('session_login failed to connect to %s'),
|
LOG.exception('session_login failed to connect to %s',
|
||||||
self._curr_ip_addr)
|
self._curr_ip_addr)
|
||||||
# Loop through the remaining management addresses
|
# Loop through the remaining management addresses
|
||||||
# to find one that's up.
|
# to find one that's up.
|
||||||
@ -92,7 +92,7 @@ class DotHillClient(object):
|
|||||||
self._get_session_key()
|
self._get_session_key()
|
||||||
return
|
return
|
||||||
except exception.DotHillConnectionError:
|
except exception.DotHillConnectionError:
|
||||||
LOG.error(_LE('Failed to connect to %s'),
|
LOG.error('Failed to connect to %s',
|
||||||
self._curr_ip_addr)
|
self._curr_ip_addr)
|
||||||
continue
|
continue
|
||||||
raise exception.DotHillConnectionError(
|
raise exception.DotHillConnectionError(
|
||||||
@ -172,20 +172,20 @@ class DotHillClient(object):
|
|||||||
return self._api_request(path, *args, **kargs)
|
return self._api_request(path, *args, **kargs)
|
||||||
except exception.DotHillConnectionError as e:
|
except exception.DotHillConnectionError as e:
|
||||||
if tries_left < 1:
|
if tries_left < 1:
|
||||||
LOG.error(_LE("Array Connection error: "
|
LOG.error("Array Connection error: "
|
||||||
"%s (no more retries)"), e.msg)
|
"%s (no more retries)", e.msg)
|
||||||
raise
|
raise
|
||||||
# Retry on any network connection errors, SSL errors, etc
|
# Retry on any network connection errors, SSL errors, etc
|
||||||
LOG.error(_LE("Array Connection error: %s (retrying)"), e.msg)
|
LOG.error("Array Connection error: %s (retrying)", e.msg)
|
||||||
except exception.DotHillRequestError as e:
|
except exception.DotHillRequestError as e:
|
||||||
if tries_left < 1:
|
if tries_left < 1:
|
||||||
LOG.error(_LE("Array Request error: %s (no more retries)"),
|
LOG.error("Array Request error: %s (no more retries)",
|
||||||
e.msg)
|
e.msg)
|
||||||
raise
|
raise
|
||||||
# Retry specific errors which may succeed if we log in again
|
# Retry specific errors which may succeed if we log in again
|
||||||
# -10027 => The user is not recognized on this system.
|
# -10027 => The user is not recognized on this system.
|
||||||
if '(-10027)' in e.msg:
|
if '(-10027)' in e.msg:
|
||||||
LOG.error(_LE("Array Request error: %s (retrying)"), e.msg)
|
LOG.error("Array Request error: %s (retrying)", e.msg)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -248,7 +248,7 @@ class DotHillClient(object):
|
|||||||
# -10186 => The specified name is already in use.
|
# -10186 => The specified name is already in use.
|
||||||
# This can occur during controller failover.
|
# This can occur during controller failover.
|
||||||
if '(-10186)' in e.msg:
|
if '(-10186)' in e.msg:
|
||||||
LOG.warning(_LW("Ignoring error in create volume: %s"), e.msg)
|
LOG.warning("Ignoring error in create volume: %s", e.msg)
|
||||||
return None
|
return None
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -261,8 +261,8 @@ class DotHillClient(object):
|
|||||||
# -10075 => The specified volume was not found.
|
# -10075 => The specified volume was not found.
|
||||||
# This can occur during controller failover.
|
# This can occur during controller failover.
|
||||||
if '(-10075)' in e.msg:
|
if '(-10075)' in e.msg:
|
||||||
LOG.warning(_LW("Ignorning error while deleting %(volume)s:"
|
LOG.warning("Ignorning error while deleting %(volume)s:"
|
||||||
" %(reason)s"),
|
" %(reason)s",
|
||||||
{'volume': name, 'reason': e.msg})
|
{'volume': name, 'reason': e.msg})
|
||||||
return
|
return
|
||||||
raise
|
raise
|
||||||
@ -277,8 +277,8 @@ class DotHillClient(object):
|
|||||||
# -10186 => The specified name is already in use.
|
# -10186 => The specified name is already in use.
|
||||||
# This can occur during controller failover.
|
# This can occur during controller failover.
|
||||||
if '(-10186)' in e.msg:
|
if '(-10186)' in e.msg:
|
||||||
LOG.warning(_LW("Ignoring error attempting to create snapshot:"
|
LOG.warning("Ignoring error attempting to create snapshot:"
|
||||||
" %s"), e.msg)
|
" %s", e.msg)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def delete_snapshot(self, snap_name):
|
def delete_snapshot(self, snap_name):
|
||||||
@ -288,7 +288,7 @@ class DotHillClient(object):
|
|||||||
# -10050 => The volume was not found on this system.
|
# -10050 => The volume was not found on this system.
|
||||||
# This can occur during controller failover.
|
# This can occur during controller failover.
|
||||||
if '(-10050)' in e.msg:
|
if '(-10050)' in e.msg:
|
||||||
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
|
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
|
||||||
return None
|
return None
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -381,8 +381,8 @@ class DotHillClient(object):
|
|||||||
except exception.DotHillRequestError as e:
|
except exception.DotHillRequestError as e:
|
||||||
# -10058: The host identifier or nickname is already in use
|
# -10058: The host identifier or nickname is already in use
|
||||||
if '(-10058)' in e.msg:
|
if '(-10058)' in e.msg:
|
||||||
LOG.error(_LE("While trying to create host nickname"
|
LOG.error("While trying to create host nickname"
|
||||||
" %(nickname)s: %(error_msg)s"),
|
" %(nickname)s: %(error_msg)s",
|
||||||
{'nickname': hostname,
|
{'nickname': hostname,
|
||||||
'error_msg': e.msg})
|
'error_msg': e.msg})
|
||||||
else:
|
else:
|
||||||
@ -400,9 +400,9 @@ class DotHillClient(object):
|
|||||||
except exception.DotHillRequestError as e:
|
except exception.DotHillRequestError as e:
|
||||||
# -3177 => "The specified LUN overlaps a previously defined LUN
|
# -3177 => "The specified LUN overlaps a previously defined LUN
|
||||||
if '(-3177)' in e.msg:
|
if '(-3177)' in e.msg:
|
||||||
LOG.info(_LI("Unable to map volume"
|
LOG.info("Unable to map volume"
|
||||||
" %(volume_name)s to lun %(lun)d:"
|
" %(volume_name)s to lun %(lun)d:"
|
||||||
" %(reason)s"),
|
" %(reason)s",
|
||||||
{'volume_name': volume_name,
|
{'volume_name': volume_name,
|
||||||
'lun': lun, 'reason': e.msg})
|
'lun': lun, 'reason': e.msg})
|
||||||
lun = self._get_next_available_lun_for_host(host,
|
lun = self._get_next_available_lun_for_host(host,
|
||||||
@ -410,8 +410,8 @@ class DotHillClient(object):
|
|||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Error while mapping volume"
|
LOG.error("Error while mapping volume"
|
||||||
" %(volume_name)s to lun %(lun)d:"),
|
" %(volume_name)s to lun %(lun)d:",
|
||||||
{'volume_name': volume_name, 'lun': lun},
|
{'volume_name': volume_name, 'lun': lun},
|
||||||
e)
|
e)
|
||||||
raise
|
raise
|
||||||
@ -430,7 +430,7 @@ class DotHillClient(object):
|
|||||||
# -10050 => The volume was not found on this system.
|
# -10050 => The volume was not found on this system.
|
||||||
# This can occur during controller failover.
|
# This can occur during controller failover.
|
||||||
if '(-10050)' in e.msg:
|
if '(-10050)' in e.msg:
|
||||||
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
|
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
|
||||||
return None
|
return None
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -481,7 +481,7 @@ class DotHillClient(object):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if count >= 5:
|
if count >= 5:
|
||||||
LOG.error(_LE('Error in copying volume: %s'), src_name)
|
LOG.error('Error in copying volume: %s', src_name)
|
||||||
raise exception.DotHillRequestError
|
raise exception.DotHillRequestError
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -26,7 +26,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
from cinder.volume.drivers.dothill import dothill_client as dothill
|
from cinder.volume.drivers.dothill import dothill_client as dothill
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ class DotHillCommon(object):
|
|||||||
self.backend_name,
|
self.backend_name,
|
||||||
self.backend_type)
|
self.backend_type)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Creation of volume %s failed."), volume['id'])
|
LOG.exception("Creation of volume %s failed.", volume['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
@ -201,7 +201,7 @@ class DotHillCommon(object):
|
|||||||
"""
|
"""
|
||||||
if (volume['status'] != "available" or
|
if (volume['status'] != "available" or
|
||||||
volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):
|
volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):
|
||||||
LOG.error(_LE("Volume must be detached for clone operation."))
|
LOG.error("Volume must be detached for clone operation.")
|
||||||
raise exception.VolumeAttached(volume_id=volume['id'])
|
raise exception.VolumeAttached(volume_id=volume['id'])
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
@ -223,7 +223,7 @@ class DotHillCommon(object):
|
|||||||
self.client.copy_volume(orig_name, dest_name,
|
self.client.copy_volume(orig_name, dest_name,
|
||||||
self.backend_name, self.backend_type)
|
self.backend_name, self.backend_type)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Cloning of volume %s failed."),
|
LOG.exception("Cloning of volume %s failed.",
|
||||||
src_vref['id'])
|
src_vref['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
@ -246,7 +246,7 @@ class DotHillCommon(object):
|
|||||||
self.client.copy_volume(orig_name, dest_name,
|
self.client.copy_volume(orig_name, dest_name,
|
||||||
self.backend_name, self.backend_type)
|
self.backend_name, self.backend_type)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Create volume failed from snapshot: %s"),
|
LOG.exception("Create volume failed from snapshot: %s",
|
||||||
snapshot['id'])
|
snapshot['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
@ -269,7 +269,7 @@ class DotHillCommon(object):
|
|||||||
# if the volume wasn't found, ignore the error
|
# if the volume wasn't found, ignore the error
|
||||||
if 'The volume was not found on this system.' in ex.args:
|
if 'The volume was not found on this system.' in ex.args:
|
||||||
return
|
return
|
||||||
LOG.exception(_LE("Deletion of volume %s failed."), volume['id'])
|
LOG.exception("Deletion of volume %s failed.", volume['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -331,7 +331,7 @@ class DotHillCommon(object):
|
|||||||
connector_element)
|
connector_element)
|
||||||
return data
|
return data
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error mapping volume: %s"), volume_name)
|
LOG.exception("Error mapping volume: %s", volume_name)
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def unmap_volume(self, volume, connector, connector_element):
|
def unmap_volume(self, volume, connector, connector_element):
|
||||||
@ -347,7 +347,7 @@ class DotHillCommon(object):
|
|||||||
connector,
|
connector,
|
||||||
connector_element)
|
connector_element)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error unmapping volume: %s"), volume_name)
|
LOG.exception("Error unmapping volume: %s", volume_name)
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -356,21 +356,21 @@ class DotHillCommon(object):
|
|||||||
try:
|
try:
|
||||||
return self.client.get_active_fc_target_ports()
|
return self.client.get_active_fc_target_ports()
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error getting active FC target ports."))
|
LOG.exception("Error getting active FC target ports.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def get_active_iscsi_target_iqns(self):
|
def get_active_iscsi_target_iqns(self):
|
||||||
try:
|
try:
|
||||||
return self.client.get_active_iscsi_target_iqns()
|
return self.client.get_active_iscsi_target_iqns()
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error getting active ISCSI target iqns."))
|
LOG.exception("Error getting active ISCSI target iqns.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def get_active_iscsi_target_portals(self):
|
def get_active_iscsi_target_portals(self):
|
||||||
try:
|
try:
|
||||||
return self.client.get_active_iscsi_target_portals()
|
return self.client.get_active_iscsi_target_portals()
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error getting active ISCSI target portals."))
|
LOG.exception("Error getting active ISCSI target portals.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
@ -387,7 +387,7 @@ class DotHillCommon(object):
|
|||||||
try:
|
try:
|
||||||
self.client.create_snapshot(vol_name, snap_name)
|
self.client.create_snapshot(vol_name, snap_name)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Creation of snapshot failed for volume: %s"),
|
LOG.exception("Creation of snapshot failed for volume: %s",
|
||||||
snapshot['volume_id'])
|
snapshot['volume_id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
@ -404,7 +404,7 @@ class DotHillCommon(object):
|
|||||||
# if the volume wasn't found, ignore the error
|
# if the volume wasn't found, ignore the error
|
||||||
if 'The volume was not found on this system.' in ex.args:
|
if 'The volume was not found on this system.' in ex.args:
|
||||||
return
|
return
|
||||||
LOG.exception(_LE("Deleting snapshot %s failed"), snapshot['id'])
|
LOG.exception("Deleting snapshot %s failed", snapshot['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -428,7 +428,7 @@ class DotHillCommon(object):
|
|||||||
try:
|
try:
|
||||||
self.client.extend_volume(volume_name, "%dGiB" % growth_size)
|
self.client.extend_volume(volume_name, "%dGiB" % growth_size)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Extension of volume %s failed."), volume['id'])
|
LOG.exception("Extension of volume %s failed.", volume['id'])
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -437,14 +437,14 @@ class DotHillCommon(object):
|
|||||||
try:
|
try:
|
||||||
return self.client.get_chap_record(initiator_name)
|
return self.client.get_chap_record(initiator_name)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error getting chap record."))
|
LOG.exception("Error getting chap record.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def create_chap_record(self, initiator_name, chap_secret):
|
def create_chap_record(self, initiator_name, chap_secret):
|
||||||
try:
|
try:
|
||||||
self.client.create_chap_record(initiator_name, chap_secret)
|
self.client.create_chap_record(initiator_name, chap_secret)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error creating chap record."))
|
LOG.exception("Error creating chap record.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
|
|
||||||
def migrate_volume(self, volume, host):
|
def migrate_volume(self, volume, host):
|
||||||
@ -489,7 +489,7 @@ class DotHillCommon(object):
|
|||||||
self.client.modify_volume_name(dest_name, source_name)
|
self.client.modify_volume_name(dest_name, source_name)
|
||||||
return (True, None)
|
return (True, None)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error migrating volume: %s"), source_name)
|
LOG.exception("Error migrating volume: %s", source_name)
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -512,7 +512,7 @@ class DotHillCommon(object):
|
|||||||
self.client.modify_volume_name(target_vol_name,
|
self.client.modify_volume_name(target_vol_name,
|
||||||
modify_target_vol_name)
|
modify_target_vol_name)
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error manage existing volume."))
|
LOG.exception("Error manage existing volume.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
@ -530,7 +530,7 @@ class DotHillCommon(object):
|
|||||||
size = self.client.get_volume_size(target_vol_name)
|
size = self.client.get_volume_size(target_vol_name)
|
||||||
return size
|
return size
|
||||||
except exception.DotHillRequestError as ex:
|
except exception.DotHillRequestError as ex:
|
||||||
LOG.exception(_LE("Error manage existing get volume size."))
|
LOG.exception("Error manage existing get volume size.")
|
||||||
raise exception.Invalid(ex)
|
raise exception.Invalid(ex)
|
||||||
finally:
|
finally:
|
||||||
self.client_logout()
|
self.client_logout()
|
||||||
|
@ -37,7 +37,7 @@ from oslo_utils import units
|
|||||||
|
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW, _LI, _LE
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
|
|||||||
try:
|
try:
|
||||||
return fn(*args)
|
return fn(*args)
|
||||||
except dbus.DBusException as e:
|
except dbus.DBusException as e:
|
||||||
LOG.warning(_LW("Got disconnected; trying to reconnect. (%s)"), e)
|
LOG.warning("Got disconnected; trying to reconnect. (%s)", e)
|
||||||
self.dbus_connect()
|
self.dbus_connect()
|
||||||
# Old function object is invalid, get new one.
|
# Old function object is invalid, get new one.
|
||||||
return getattr(self.odm, fn._method_name)(*args)
|
return getattr(self.odm, fn._method_name)(*args)
|
||||||
@ -354,8 +354,8 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
retry += 1
|
retry += 1
|
||||||
# Not yet
|
# Not yet
|
||||||
LOG.warning(_LW('Try #%(try)d: Volume "%(res)s"/%(vol)d '
|
LOG.warning('Try #%(try)d: Volume "%(res)s"/%(vol)d '
|
||||||
'not yet deployed on "%(host)s", waiting.'),
|
'not yet deployed on "%(host)s", waiting.',
|
||||||
{'try': retry, 'host': nodenames,
|
{'try': retry, 'host': nodenames,
|
||||||
'res': res_name, 'vol': vol_nr})
|
'res': res_name, 'vol': vol_nr})
|
||||||
|
|
||||||
@ -771,9 +771,9 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
|
|||||||
|
|
||||||
if not d_res_name:
|
if not d_res_name:
|
||||||
# resource already gone?
|
# resource already gone?
|
||||||
LOG.warning(_LW("snapshot: %s not found, "
|
LOG.warning("snapshot: %s not found, "
|
||||||
"skipping delete operation"), snapshot['id'])
|
"skipping delete operation", snapshot['id'])
|
||||||
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
|
LOG.info('Successfully deleted snapshot: %s', snapshot['id'])
|
||||||
return True
|
return True
|
||||||
|
|
||||||
res = self.call_or_reconnect(self.odm.remove_snapshot,
|
res = self.call_or_reconnect(self.odm.remove_snapshot,
|
||||||
@ -1035,7 +1035,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver):
|
|||||||
|
|
||||||
if len(data) < 1:
|
if len(data) < 1:
|
||||||
# already removed?!
|
# already removed?!
|
||||||
LOG.info(_LI('DRBD connection for %s already removed'),
|
LOG.info('DRBD connection for %s already removed',
|
||||||
volume['id'])
|
volume['id'])
|
||||||
elif len(data) == 1:
|
elif len(data) == 1:
|
||||||
__, __, props, __ = data[0]
|
__, __, props, __ = data[0]
|
||||||
@ -1062,7 +1062,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver):
|
|||||||
self._check_result(res, ignore=[dm_exc.DM_ENOENT])
|
self._check_result(res, ignore=[dm_exc.DM_ENOENT])
|
||||||
else:
|
else:
|
||||||
# more than one assignment?
|
# more than one assignment?
|
||||||
LOG.error(_LE("DRBDmanage: too many assignments returned."))
|
LOG.error("DRBDmanage: too many assignments returned.")
|
||||||
return
|
return
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
|
@ -20,7 +20,7 @@ This driver requires FSS-8.00-8865 or later.
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder import interface
|
from cinder import interface
|
||||||
import cinder.volume.driver
|
import cinder.volume.driver
|
||||||
from cinder.volume.drivers.falconstor import fss_common
|
from cinder.volume.drivers.falconstor import fss_common
|
||||||
@ -71,8 +71,8 @@ class FSSFCDriver(fss_common.FalconstorBaseDriver,
|
|||||||
def validate_connector(self, connector):
|
def validate_connector(self, connector):
|
||||||
"""Check connector for at least one enabled FC protocol."""
|
"""Check connector for at least one enabled FC protocol."""
|
||||||
if 'FC' == self._storage_protocol and 'wwpns' not in connector:
|
if 'FC' == self._storage_protocol and 'wwpns' not in connector:
|
||||||
LOG.error(_LE('The connector does not contain the required '
|
LOG.error('The connector does not contain the required '
|
||||||
'information.'))
|
'information.')
|
||||||
raise exception.InvalidConnectorException(missing='wwpns')
|
raise exception.InvalidConnectorException(missing='wwpns')
|
||||||
|
|
||||||
@fczm_utils.add_fc_zone
|
@fczm_utils.add_fc_zone
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user