Remove log translations in share and share_group 4/5
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. This is the 4/5 commit. Old commit will be abandoned: https://review.openstack.org/#/c/447822/ See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: Ia46e9dc4953c788274f5c9b763b2fed96c28d60e Depends-On: I9fd264a443c634465b8548067f86ac14c1a51faa Partial-Bug: #1674542
This commit is contained in:
parent
ffe135a5b3
commit
059fae0ed5
@ -18,7 +18,7 @@ import copy
|
||||
from oslo_log import log
|
||||
|
||||
from manila.common import constants
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -493,8 +493,8 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
|
||||
remove_rules, share_instance, share_server):
|
||||
for rule in add_rules:
|
||||
LOG.info(
|
||||
_LI("Applying access rule '%(rule)s' for share "
|
||||
"instance '%(instance)s'"),
|
||||
"Applying access rule '%(rule)s' for share "
|
||||
"instance '%(instance)s'",
|
||||
{'rule': rule['id'], 'instance': share_instance['id']}
|
||||
)
|
||||
|
||||
@ -511,8 +511,8 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
|
||||
|
||||
for rule in delete_rules:
|
||||
LOG.info(
|
||||
_LI("Denying access rule '%(rule)s' from share "
|
||||
"instance '%(instance)s'"),
|
||||
"Denying access rule '%(rule)s' from share "
|
||||
"instance '%(instance)s'",
|
||||
{'rule': rule['id'], 'instance': share_instance['id']}
|
||||
)
|
||||
|
||||
|
@ -32,7 +32,7 @@ from manila.common import constants
|
||||
from manila.data import rpcapi as data_rpcapi
|
||||
from manila.db import base
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila import policy
|
||||
from manila import quota
|
||||
from manila.scheduler import rpcapi as scheduler_rpcapi
|
||||
@ -148,22 +148,22 @@ class API(base.Base):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'gigabytes' in overs:
|
||||
LOG.warning(_LW("Quota exceeded for %(s_pid)s, "
|
||||
"tried to create "
|
||||
"%(s_size)sG share (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed)."), {
|
||||
's_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
LOG.warning("Quota exceeded for %(s_pid)s, "
|
||||
"tried to create "
|
||||
"%(s_size)sG share (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed).", {
|
||||
's_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
raise exception.ShareSizeExceedsAvailableQuota()
|
||||
elif 'shares' in overs:
|
||||
LOG.warning(_LW("Quota exceeded for %(s_pid)s, "
|
||||
"tried to create "
|
||||
"share (%(d_consumed)d shares "
|
||||
"already consumed)."), {
|
||||
's_pid': context.project_id,
|
||||
'd_consumed': _consumed('shares')})
|
||||
LOG.warning("Quota exceeded for %(s_pid)s, "
|
||||
"tried to create "
|
||||
"share (%(d_consumed)d shares "
|
||||
"already consumed).", {
|
||||
's_pid': context.project_id,
|
||||
'd_consumed': _consumed('shares')})
|
||||
raise exception.ShareLimitExceeded(allowed=quotas['shares'])
|
||||
|
||||
try:
|
||||
@ -524,7 +524,7 @@ class API(base.Base):
|
||||
msg = _("Cannot delete last active replica.")
|
||||
raise exception.ReplicationException(reason=msg)
|
||||
|
||||
LOG.info(_LI("Deleting replica %s."), id)
|
||||
LOG.info("Deleting replica %s.", id)
|
||||
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
@ -927,7 +927,7 @@ class API(base.Base):
|
||||
except Exception as e:
|
||||
reservations = None
|
||||
LOG.exception(
|
||||
_LE("Failed to update quota for deleting share: %s"), e)
|
||||
("Failed to update quota for deleting share: %s"), e)
|
||||
|
||||
for share_instance in share.instances:
|
||||
if share_instance['host']:
|
||||
@ -984,7 +984,7 @@ class API(base.Base):
|
||||
share_groups = self.db.share_group_get_all_by_share_server(
|
||||
context, server['id'])
|
||||
if share_groups:
|
||||
LOG.error(_LE("share server '%(ssid)s' in use by share groups."),
|
||||
LOG.error("share server '%(ssid)s' in use by share groups.",
|
||||
{'ssid': server['id']})
|
||||
raise exception.ShareServerInUse(share_server_id=server['id'])
|
||||
|
||||
@ -1020,18 +1020,18 @@ class API(base.Base):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'snapshot_gigabytes' in overs:
|
||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG snapshot (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed).")
|
||||
msg = ("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG snapshot (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed).")
|
||||
LOG.warning(msg, {'s_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['snapshot_gigabytes']})
|
||||
raise exception.SnapshotSizeExceedsAvailableQuota()
|
||||
elif 'snapshots' in overs:
|
||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"snapshot (%(d_consumed)d snapshots "
|
||||
"already consumed).")
|
||||
msg = ("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"snapshot (%(d_consumed)d snapshots "
|
||||
"already consumed).")
|
||||
LOG.warning(msg, {'s_pid': context.project_id,
|
||||
'd_consumed': _consumed('snapshots')})
|
||||
raise exception.SnapshotLimitExceeded(
|
||||
@ -1190,17 +1190,17 @@ class API(base.Base):
|
||||
if (new_share_network_id == share_instance['share_network_id'] and
|
||||
new_share_type_id == share_instance['share_type_id'] and
|
||||
dest_host == share_instance['host']):
|
||||
msg = _LI("Destination host (%(dest_host)s), share network "
|
||||
"(%(dest_sn)s) or share type (%(dest_st)s) are the same "
|
||||
"as the current host's '%(src_host)s', '%(src_sn)s' and "
|
||||
"'%(src_st)s' respectively. Nothing to be done.") % {
|
||||
'dest_host': dest_host,
|
||||
'dest_sn': new_share_network_id,
|
||||
'dest_st': new_share_type_id,
|
||||
'src_host': share_instance['host'],
|
||||
'src_sn': share_instance['share_network_id'],
|
||||
'src_st': share_instance['share_type_id'],
|
||||
}
|
||||
msg = ("Destination host (%(dest_host)s), share network "
|
||||
"(%(dest_sn)s) or share type (%(dest_st)s) are the same "
|
||||
"as the current host's '%(src_host)s', '%(src_sn)s' and "
|
||||
"'%(src_st)s' respectively. Nothing to be done.") % {
|
||||
'dest_host': dest_host,
|
||||
'dest_sn': new_share_network_id,
|
||||
'dest_st': new_share_type_id,
|
||||
'src_host': share_instance['host'],
|
||||
'src_sn': share_instance['share_network_id'],
|
||||
'src_st': share_instance['share_type_id'],
|
||||
}
|
||||
LOG.info(msg)
|
||||
self.db.share_update(
|
||||
context, share['id'],
|
||||
@ -1304,8 +1304,8 @@ class API(base.Base):
|
||||
elif share['task_state'] == (
|
||||
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
|
||||
data_rpc = data_rpcapi.DataAPI()
|
||||
LOG.info(_LI("Sending request to get share migration information"
|
||||
" of share %s.") % share['id'])
|
||||
LOG.info("Sending request to get share migration information"
|
||||
" of share %s." % share['id'])
|
||||
|
||||
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
||||
|
||||
@ -1404,8 +1404,8 @@ class API(base.Base):
|
||||
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
|
||||
|
||||
data_rpc = data_rpcapi.DataAPI()
|
||||
LOG.info(_LI("Sending request to cancel migration of "
|
||||
"share %s.") % share['id'])
|
||||
LOG.info("Sending request to cancel migration of "
|
||||
"share %s." % share['id'])
|
||||
|
||||
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
||||
|
||||
@ -1796,9 +1796,9 @@ class API(base.Base):
|
||||
def _consumed(name):
|
||||
return usages[name]['reserved'] + usages[name]['in_use']
|
||||
|
||||
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend share "
|
||||
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
|
||||
"already consumed).")
|
||||
msg = ("Quota exceeded for %(s_pid)s, tried to extend share "
|
||||
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
|
||||
"already consumed).")
|
||||
LOG.error(msg, {'s_pid': context.project_id,
|
||||
's_size': size_increase,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
@ -1810,7 +1810,7 @@ class API(base.Base):
|
||||
|
||||
self.update(context, share, {'status': constants.STATUS_EXTENDING})
|
||||
self.share_rpcapi.extend_share(context, share, new_size, reservations)
|
||||
LOG.info(_LI("Extend share request issued successfully."),
|
||||
LOG.info("Extend share request issued successfully.",
|
||||
resource=share)
|
||||
|
||||
def shrink(self, context, share, new_size):
|
||||
@ -1843,9 +1843,9 @@ class API(base.Base):
|
||||
|
||||
self.update(context, share, {'status': constants.STATUS_SHRINKING})
|
||||
self.share_rpcapi.shrink_share(context, share, new_size)
|
||||
LOG.info(_LI("Shrink share (id=%(id)s) request issued successfully."
|
||||
" New size: %(size)s") % {'id': share['id'],
|
||||
'size': new_size})
|
||||
LOG.info("Shrink share (id=%(id)s) request issued successfully."
|
||||
" New size: %(size)s" % {'id': share['id'],
|
||||
'size': new_size})
|
||||
|
||||
def snapshot_allow_access(self, context, snapshot, access_type, access_to):
|
||||
"""Allow access to a share snapshot."""
|
||||
|
@ -25,7 +25,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila import network
|
||||
from manila import utils
|
||||
|
||||
@ -213,8 +213,8 @@ class ExecuteMixin(object):
|
||||
tries += 1
|
||||
if tries >= self.configuration.num_shell_tries:
|
||||
raise
|
||||
LOG.exception(_LE("Recovering from a failed execute. "
|
||||
"Try number %s"), tries)
|
||||
LOG.exception("Recovering from a failed execute. "
|
||||
"Try number %s", tries)
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
|
||||
@ -1295,8 +1295,8 @@ class ShareDriver(object):
|
||||
self.delete_snapshot(
|
||||
context, share_snapshot, share_server=share_server)
|
||||
except exception.ManilaException:
|
||||
msg = _LE('Could not delete share group snapshot member %(snap)s '
|
||||
'for share %(share)s.')
|
||||
msg = ('Could not delete share group snapshot member %(snap)s '
|
||||
'for share %(share)s.')
|
||||
LOG.error(msg % {
|
||||
'snap': share_snapshot['id'],
|
||||
'share': share_snapshot['share_id'],
|
||||
@ -1361,7 +1361,7 @@ class ShareDriver(object):
|
||||
raise exception.ShareGroupSnapshotNotSupported(
|
||||
share_group=snap_dict['share_group_id'])
|
||||
elif not snapshot_members:
|
||||
LOG.warning(_LW('No shares in share group to create snapshot.'))
|
||||
LOG.warning('No shares in share group to create snapshot.')
|
||||
return None, None
|
||||
else:
|
||||
share_snapshots = []
|
||||
@ -1386,9 +1386,9 @@ class ShareDriver(object):
|
||||
snapshot_members_updates.append(member_update)
|
||||
share_snapshots.append(share_snapshot)
|
||||
except exception.ManilaException as e:
|
||||
msg = _LE('Could not create share group snapshot. Failed '
|
||||
'to create share snapshot %(snap)s for '
|
||||
'share %(share)s.')
|
||||
msg = ('Could not create share group snapshot. Failed '
|
||||
'to create share snapshot %(snap)s for '
|
||||
'share %(share)s.')
|
||||
LOG.exception(msg % {
|
||||
'snap': share_snapshot['id'],
|
||||
'share': share_snapshot['share_id']
|
||||
|
@ -20,7 +20,7 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share import share_types
|
||||
|
||||
@ -142,7 +142,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
|
||||
auth_id = self.configuration.safe_get('cephfs_auth_id')
|
||||
self._volume_client = ceph_volume_client.CephFSVolumeClient(
|
||||
auth_id, conf_path, cluster_name)
|
||||
LOG.info(_LI("[%(be)s}] Ceph client found, connecting..."),
|
||||
LOG.info("[%(be)s}] Ceph client found, connecting...",
|
||||
{"be": self.backend_name})
|
||||
if auth_id != CEPH_DEFAULT_AUTH_ID:
|
||||
# Evict any other manila sessions. Only do this if we're
|
||||
@ -157,7 +157,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
|
||||
self._volume_client = None
|
||||
raise
|
||||
else:
|
||||
LOG.info(_LI("[%(be)s] Ceph client connection complete."),
|
||||
LOG.info("[%(be)s] Ceph client connection complete.",
|
||||
{"be": self.backend_name})
|
||||
|
||||
return self._volume_client
|
||||
@ -199,7 +199,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
|
||||
addrs=",".join(mon_addrs),
|
||||
path=volume['mount_path'])
|
||||
|
||||
LOG.info(_LI("Calculated export location for share %(id)s: %(loc)s"),
|
||||
LOG.info("Calculated export location for share %(id)s: %(loc)s",
|
||||
{"id": share['id'], "loc": export_location})
|
||||
|
||||
return {
|
||||
@ -244,8 +244,8 @@ class CephFSNativeDriver(driver.ShareDriver,):
|
||||
|
||||
def _deny_access(self, context, share, access, share_server=None):
|
||||
if access['access_type'] != CEPHX_ACCESS_TYPE:
|
||||
LOG.warning(_LW("Invalid access type '%(type)s', "
|
||||
"ignoring in deny."),
|
||||
LOG.warning("Invalid access type '%(type)s', "
|
||||
"ignoring in deny.",
|
||||
{"type": access['access_type']})
|
||||
return
|
||||
|
||||
|
@ -18,7 +18,7 @@ import uuid
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
|
||||
|
||||
@ -66,8 +66,8 @@ class DockerExecHelper(driver.ExecuteMixin):
|
||||
if result[1] != "":
|
||||
raise exception.ManilaException(
|
||||
_("Container %s has failed to start.") % name)
|
||||
LOG.info(_LI("A container has been successfully started! Its id is "
|
||||
"%s."), result[0].rstrip('\n'))
|
||||
LOG.info("A container has been successfully started! Its id is "
|
||||
"%s.", result[0].rstrip('\n'))
|
||||
|
||||
def stop_container(self, name):
|
||||
LOG.debug("Stopping container %s.", name)
|
||||
@ -76,7 +76,7 @@ class DockerExecHelper(driver.ExecuteMixin):
|
||||
if result[1] != '':
|
||||
raise exception.ManilaException(
|
||||
_("Container %s has failed to stop properly.") % name)
|
||||
LOG.info(_LI("Container %s is successfully stopped."), name)
|
||||
LOG.info("Container %s is successfully stopped.", name)
|
||||
|
||||
def execute(self, name=None, cmd=None):
|
||||
if name is None:
|
||||
@ -94,7 +94,7 @@ class DockerExecHelper(driver.ExecuteMixin):
|
||||
try:
|
||||
result = self._execute(*cmd, run_as_root=True)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Executing command failed."))
|
||||
LOG.exception("Executing command failed.")
|
||||
return None
|
||||
LOG.debug("Execution result: %s.", result)
|
||||
return result
|
||||
|
@ -27,7 +27,7 @@ from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila import utils
|
||||
|
||||
@ -156,9 +156,9 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
["rm", "-fR", "/shares/%s" % share.share_id]
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to remove /shares/%(share)s directory in "
|
||||
"container %(cont)s."), {"share": share.share_id,
|
||||
"cont": server_id})
|
||||
LOG.warning("Failed to remove /shares/%(share)s directory in "
|
||||
"container %(cont)s.", {"share": share.share_id,
|
||||
"cont": server_id})
|
||||
LOG.error(e)
|
||||
|
||||
self.storage.remove_storage(share)
|
||||
@ -213,8 +213,8 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
"specified.") % neutron_class
|
||||
raise exception.ManilaException(msg)
|
||||
elif host_id is None:
|
||||
LOG.warning(_LW("neutron_host_id is not specified. This driver "
|
||||
"might not work as expected without it."))
|
||||
LOG.warning("neutron_host_id is not specified. This driver "
|
||||
"might not work as expected without it.")
|
||||
|
||||
def _connect_to_network(self, server_id, network_info, host_veth):
|
||||
LOG.debug("Attempting to connect container to neutron network.")
|
||||
@ -280,8 +280,8 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
self.configuration.container_ovs_bridge_name,
|
||||
veth, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to delete port %s: port "
|
||||
"vanished."), veth)
|
||||
LOG.warning("Failed to delete port %s: port "
|
||||
"vanished.", veth)
|
||||
LOG.error(e)
|
||||
|
||||
def _get_veth_state(self):
|
||||
@ -317,5 +317,5 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
|
||||
veth = self._get_corresponding_veth(veths_before, veths_after)
|
||||
self._connect_to_network(server_id, network_info, veth)
|
||||
LOG.info(_LI("Container %s was created."), server_id)
|
||||
LOG.info("Container %s was created.", server_id)
|
||||
return {"id": network_info["server_id"]}
|
||||
|
@ -17,7 +17,7 @@ from oslo_log import log
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -110,7 +110,7 @@ class DockerCIFSHelper(object):
|
||||
existing_users = self._get_existing_users(server_id, share_name,
|
||||
access)
|
||||
except TypeError:
|
||||
LOG.warning(_LW("Can't access smbd at share %s.") % share_name)
|
||||
LOG.warning("Can't access smbd at share %s." % share_name)
|
||||
return
|
||||
else:
|
||||
allowed_users = " ".join(sorted(set(existing_users.split()) -
|
||||
|
@ -20,7 +20,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
|
||||
|
||||
@ -85,7 +85,7 @@ class LVMHelper(driver.ExecuteMixin):
|
||||
try:
|
||||
self._execute("umount", to_remove, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to umount helper directory %s."),
|
||||
LOG.warning("Failed to umount helper directory %s.",
|
||||
to_remove)
|
||||
LOG.error(e)
|
||||
# (aovchinnikov): bug 1621784 manifests itself in jamming logical
|
||||
@ -94,7 +94,7 @@ class LVMHelper(driver.ExecuteMixin):
|
||||
self._execute("lvremove", "-f", "--autobackup", "n",
|
||||
to_remove, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to remove logical volume %s.") % to_remove)
|
||||
LOG.warning("Failed to remove logical volume %s." % to_remove)
|
||||
LOG.error(e)
|
||||
|
||||
def extend_share(self, share, new_size, share_server=None):
|
||||
|
@ -24,7 +24,7 @@ import six
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins import base
|
||||
from manila.share.drivers.dell_emc.plugins.isilon import isilon_api
|
||||
|
||||
@ -139,8 +139,8 @@ class IsilonStorageConnection(base.StorageConnection):
|
||||
self._root_dir + '/' + share['name'])
|
||||
|
||||
if share_id is None:
|
||||
lw = _LW('Attempted to delete NFS Share "%s", but the share does '
|
||||
'not appear to exist.')
|
||||
lw = ('Attempted to delete NFS Share "%s", but the share does '
|
||||
'not appear to exist.')
|
||||
LOG.warning(lw, share['name'])
|
||||
else:
|
||||
# attempt to delete the share
|
||||
@ -154,8 +154,8 @@ class IsilonStorageConnection(base.StorageConnection):
|
||||
"""Is called to remove CIFS share."""
|
||||
smb_share = self._isilon_api.lookup_smb_share(share['name'])
|
||||
if smb_share is None:
|
||||
lw = _LW('Attempted to delete CIFS Share "%s", but the share does '
|
||||
'not appear to exist.')
|
||||
lw = ('Attempted to delete CIFS Share "%s", but the share does '
|
||||
'not appear to exist.')
|
||||
LOG.warning(lw, share['name'])
|
||||
else:
|
||||
share_deleted = self._isilon_api.delete_smb_share(share['name'])
|
||||
|
@ -25,7 +25,7 @@ if storops:
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins.unity import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -34,7 +34,7 @@ LOG = log.getLogger(__name__)
|
||||
class UnityClient(object):
|
||||
def __init__(self, host, username, password):
|
||||
if storops is None:
|
||||
LOG.error(_LE('StorOps is required to run EMC Unity driver.'))
|
||||
LOG.error('StorOps is required to run EMC Unity driver.')
|
||||
self.system = storops.UnitySystem(host, username, password)
|
||||
|
||||
def create_cifs_share(self, resource, share_name):
|
||||
@ -51,7 +51,7 @@ class UnityClient(object):
|
||||
# based share. Log the internal error if it happens.
|
||||
share.enable_ace()
|
||||
except storops_ex.UnityException:
|
||||
msg = _LE('Failed to enabled ACE for share: {}.')
|
||||
msg = ('Failed to enabled ACE for share: {}.')
|
||||
LOG.exception(msg.format(share_name))
|
||||
return share
|
||||
except storops_ex.UnitySmbShareNameExistedError:
|
||||
@ -116,22 +116,22 @@ class UnityClient(object):
|
||||
try:
|
||||
filesystem.delete()
|
||||
except storops_ex.UnityResourceNotFoundError:
|
||||
LOG.info(_LI('Filesystem %s is already removed.'), filesystem.name)
|
||||
LOG.info('Filesystem %s is already removed.', filesystem.name)
|
||||
|
||||
def create_nas_server(self, name, sp, pool, tenant=None):
|
||||
try:
|
||||
return self.system.create_nas_server(name, sp, pool,
|
||||
tenant=tenant)
|
||||
except storops_ex.UnityNasServerNameUsedError:
|
||||
LOG.info(_LI('Share server %s already exists, ignoring share '
|
||||
'server creation.'), name)
|
||||
LOG.info('Share server %s already exists, ignoring share '
|
||||
'server creation.', name)
|
||||
return self.get_nas_server(name)
|
||||
|
||||
def get_nas_server(self, name):
|
||||
try:
|
||||
return self.system.get_nas_server(name=name)
|
||||
except storops_ex.UnityResourceNotFoundError:
|
||||
LOG.info(_LI('NAS server %s not found.'), name)
|
||||
LOG.info('NAS server %s not found.', name)
|
||||
raise
|
||||
|
||||
def delete_nas_server(self, name, username=None, password=None):
|
||||
@ -141,7 +141,7 @@ class UnityClient(object):
|
||||
tenant = nas_server.tenant
|
||||
nas_server.delete(username=username, password=password)
|
||||
except storops_ex.UnityResourceNotFoundError:
|
||||
LOG.info(_LI('NAS server %s not found.'), name)
|
||||
LOG.info('NAS server %s not found.', name)
|
||||
|
||||
if tenant is not None:
|
||||
self._delete_tenant(tenant)
|
||||
@ -156,8 +156,8 @@ class UnityClient(object):
|
||||
try:
|
||||
tenant.delete(delete_hosts=True)
|
||||
except storops_ex.UnityException as ex:
|
||||
LOG.warning(_LW('Delete tenant %(tenant)s failed with error: '
|
||||
'%(ex)s. Leave the tenant on the system.'),
|
||||
LOG.warning('Delete tenant %(tenant)s failed with error: '
|
||||
'%(ex)s. Leave the tenant on the system.',
|
||||
{'tenant': tenant.get_id(),
|
||||
'ex': ex})
|
||||
|
||||
@ -166,8 +166,8 @@ class UnityClient(object):
|
||||
try:
|
||||
nas_server.create_dns_server(domain, dns_ip)
|
||||
except storops_ex.UnityOneDnsPerNasServerError:
|
||||
LOG.info(_LI('DNS server %s already exists, '
|
||||
'ignoring DNS server creation.'), domain)
|
||||
LOG.info('DNS server %s already exists, '
|
||||
'ignoring DNS server creation.', domain)
|
||||
|
||||
@staticmethod
|
||||
def create_interface(nas_server, ip_addr, netmask, gateway, port_id,
|
||||
@ -190,16 +190,16 @@ class UnityClient(object):
|
||||
domain_username=username,
|
||||
domain_password=password)
|
||||
except storops_ex.UnitySmbNameInUseError:
|
||||
LOG.info(_LI('CIFS service on NAS server %s is '
|
||||
'already enabled.'), nas_server.name)
|
||||
LOG.info('CIFS service on NAS server %s is '
|
||||
'already enabled.', nas_server.name)
|
||||
|
||||
@staticmethod
|
||||
def enable_nfs_service(nas_server):
|
||||
try:
|
||||
nas_server.enable_nfs_service()
|
||||
except storops_ex.UnityNfsAlreadyEnabledError:
|
||||
LOG.info(_LI('NFS service on NAS server %s is '
|
||||
'already enabled.'), nas_server.name)
|
||||
LOG.info('NFS service on NAS server %s is '
|
||||
'already enabled.', nas_server.name)
|
||||
|
||||
@staticmethod
|
||||
def create_snapshot(filesystem, name):
|
||||
@ -207,8 +207,8 @@ class UnityClient(object):
|
||||
try:
|
||||
return filesystem.create_snap(name, fs_access_type=access_type)
|
||||
except storops_ex.UnitySnapNameInUseError:
|
||||
LOG.info(_LI('Snapshot %(snap)s on Filesystem %(fs)s already '
|
||||
'exists.'), {'snap': name, 'fs': filesystem.name})
|
||||
LOG.info('Snapshot %(snap)s on Filesystem %(fs)s already '
|
||||
'exists.', {'snap': name, 'fs': filesystem.name})
|
||||
|
||||
def create_snap_of_snap(self, src_snap, dst_snap_name, snap_type):
|
||||
access_type = enums.FilesystemSnapAccessTypeEnum.PROTOCOL
|
||||
@ -233,7 +233,7 @@ class UnityClient(object):
|
||||
try:
|
||||
snap.delete()
|
||||
except storops_ex.UnityResourceNotFoundError:
|
||||
LOG.info(_LI('Snapshot %s is already removed.'), snap.name)
|
||||
LOG.info('Snapshot %s is already removed.', snap.name)
|
||||
|
||||
def get_pool(self, name=None):
|
||||
return self.system.get_pool(name=name)
|
||||
@ -283,7 +283,7 @@ class UnityClient(object):
|
||||
try:
|
||||
share.delete_access(host_ip)
|
||||
except storops_ex.UnityHostNotFoundException:
|
||||
LOG.info(_LI('%(host)s access to %(share)s is already removed.'),
|
||||
LOG.info('%(host)s access to %(share)s is already removed.',
|
||||
{'host': host_ip, 'share': share_name})
|
||||
|
||||
def get_file_ports(self):
|
||||
@ -328,6 +328,6 @@ class UnityClient(object):
|
||||
"Use the existing VLAN tenant.", vlan_id)
|
||||
exc.reraise = False
|
||||
except storops_ex.SystemAPINotSupported:
|
||||
LOG.info(_LI("This system doesn't support tenant."))
|
||||
LOG.info("This system doesn't support tenant.")
|
||||
|
||||
return tenant
|
||||
|
@ -27,7 +27,7 @@ if storops:
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins import base as driver
|
||||
from manila.share.drivers.dell_emc.plugins.unity import client
|
||||
from manila.share.drivers.dell_emc.plugins.unity import utils as unity_utils
|
||||
@ -127,9 +127,9 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
raise exception.BadConfigurationException(reason=msg)
|
||||
|
||||
if unmanaged_port_ids:
|
||||
LOG.info(_LI("The following specified ports are not managed by "
|
||||
"the backend: %(unmanaged)s. This host will only "
|
||||
"manage the storage ports: %(exist)s"),
|
||||
LOG.info("The following specified ports are not managed by "
|
||||
"the backend: %(unmanaged)s. This host will only "
|
||||
"manage the storage ports: %(exist)s",
|
||||
{'unmanaged': ",".join(unmanaged_port_ids),
|
||||
'exist': ",".join(map(",".join,
|
||||
sp_ports_map.values()))})
|
||||
@ -138,8 +138,8 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
",".join(map(",".join, sp_ports_map.values())))
|
||||
|
||||
if len(sp_ports_map) == 1:
|
||||
LOG.info(_LI("Only ports of %s are configured. Configure ports "
|
||||
"of both SPA and SPB to use both of the SPs."),
|
||||
LOG.info("Only ports of %s are configured. Configure ports "
|
||||
"of both SPA and SPB to use both of the SPs.",
|
||||
list(sp_ports_map)[0])
|
||||
|
||||
return sp_ports_map
|
||||
@ -237,7 +237,7 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
backend_share = self.client.get_share(share_name,
|
||||
share['share_proto'])
|
||||
except storops_ex.UnityResourceNotFoundError:
|
||||
LOG.warning(_LW("Share %s is not found when deleting the share"),
|
||||
LOG.warning("Share %s is not found when deleting the share",
|
||||
share_name)
|
||||
return
|
||||
|
||||
@ -261,8 +261,8 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
new_size)
|
||||
else:
|
||||
share_id = share['id']
|
||||
reason = _LE("Driver does not support extending a "
|
||||
"snapshot based share.")
|
||||
reason = ("Driver does not support extending a "
|
||||
"snapshot based share.")
|
||||
raise exception.ShareExtendingError(share_id=share_id,
|
||||
reason=reason)
|
||||
|
||||
@ -422,7 +422,7 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Could not setup server.'))
|
||||
LOG.exception('Could not setup server.')
|
||||
server_details = {'share_server_name': server_name}
|
||||
self.teardown_server(
|
||||
server_details, network_info['security_services'])
|
||||
@ -538,10 +538,10 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
raise exception.BadConfigurationException(reason=msg)
|
||||
|
||||
if unmanaged_pools:
|
||||
LOG.info(_LI("The following specified storage pools "
|
||||
"are not managed by the backend: "
|
||||
"%(un_managed)s. This host will only manage "
|
||||
"the storage pools: %(exist)s"),
|
||||
LOG.info("The following specified storage pools "
|
||||
"are not managed by the backend: "
|
||||
"%(un_managed)s. This host will only manage "
|
||||
"the storage pools: %(exist)s",
|
||||
{'un_managed': ",".join(unmanaged_pools),
|
||||
'exist': ",".join(matched_pools)})
|
||||
else:
|
||||
@ -621,13 +621,13 @@ class UnityStorageConnection(driver.StorageConnection):
|
||||
# Enable NFS service with kerberos
|
||||
kerberos_enabled = True
|
||||
# TODO(jay.xu): enable nfs service with kerberos
|
||||
LOG.warning(_LW('Kerberos is not supported by '
|
||||
'EMC Unity manila driver plugin.'))
|
||||
LOG.warning('Kerberos is not supported by '
|
||||
'EMC Unity manila driver plugin.')
|
||||
elif service_type == 'ldap':
|
||||
LOG.warning(_LW('LDAP is not supported by '
|
||||
'EMC Unity manila driver plugin.'))
|
||||
LOG.warning('LDAP is not supported by '
|
||||
'EMC Unity manila driver plugin.')
|
||||
else:
|
||||
LOG.warning(_LW('Unknown security service type: %s.'),
|
||||
LOG.warning('Unknown security service type: %s.',
|
||||
service_type)
|
||||
|
||||
if not kerberos_enabled:
|
||||
|
@ -24,7 +24,7 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins import base as driver
|
||||
from manila.share.drivers.dell_emc.plugins.vmax import (
|
||||
object_manager as manager)
|
||||
@ -250,8 +250,8 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Delete a share."""
|
||||
if share_server is None:
|
||||
LOG.warning(_LW("Share network should be specified for "
|
||||
"share deletion."))
|
||||
LOG.warning("Share network should be specified for "
|
||||
"share deletion.")
|
||||
return
|
||||
|
||||
share_proto = share['share_proto'].upper()
|
||||
@ -295,20 +295,20 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
# Delete mount point
|
||||
self._get_context('MountPoint').delete(path, vdm_name)
|
||||
except exception.EMCVmaxXMLAPIError as e:
|
||||
LOG.exception(_LE("CIFS server %(name)s on mover %(mover_name)s "
|
||||
LOG.exception("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found due to error %(err)s. Skip the "
|
||||
"deletion."),
|
||||
"deletion.",
|
||||
{'name': path, 'mover_name': vdm_name,
|
||||
'err': e.message})
|
||||
'err': e.message})
|
||||
|
||||
try:
|
||||
# Delete file system
|
||||
self._get_context('FileSystem').delete(share_name)
|
||||
except exception.EMCVmaxXMLAPIError as e:
|
||||
LOG.exception(_LE("File system %(share_name)s not found due to"
|
||||
"error %(err)s. Skip the deletion."),
|
||||
LOG.exception("File system %(share_name)s not found due to"
|
||||
"error %(err)s. Skip the deletion.",
|
||||
{'share_name': share_name,
|
||||
'err': e.message})
|
||||
'err': e.message})
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Delete a snapshot."""
|
||||
@ -471,7 +471,7 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
share_name = share['id']
|
||||
|
||||
if access['access_type'] != 'user':
|
||||
LOG.warning(_LW("Only user access type allowed for CIFS share."))
|
||||
LOG.warning("Only user access type allowed for CIFS share.")
|
||||
return
|
||||
|
||||
user_name = access['access_to']
|
||||
@ -505,7 +505,7 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
|
||||
access_type = access['access_type']
|
||||
if access_type != 'ip':
|
||||
LOG.warning(_LW("Only ip access type allowed."))
|
||||
LOG.warning("Only ip access type allowed.")
|
||||
return
|
||||
|
||||
host_ip = access['access_to']
|
||||
@ -550,7 +550,7 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
",".join(real_pools))
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
LOG.info(_LI("Storage pools: %s will be managed."),
|
||||
LOG.info("Storage pools: %s will be managed.",
|
||||
",".join(matched_pools))
|
||||
else:
|
||||
LOG.debug("No storage pool is specified, so all pools "
|
||||
@ -722,7 +722,7 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Could not setup server'))
|
||||
LOG.exception('Could not setup server')
|
||||
server_details = self._construct_backend_details(
|
||||
vdm_name, allocated_interfaces)
|
||||
self.teardown_server(
|
||||
@ -810,7 +810,7 @@ class VMAXStorageConnection(driver.StorageConnection):
|
||||
status, servers = self._get_context('CIFSServer').get_all(
|
||||
vdm_name)
|
||||
if constants.STATUS_OK != status:
|
||||
LOG.error(_LE('Could not find CIFS server by name: %s.'),
|
||||
LOG.error('Could not find CIFS server by name: %s.',
|
||||
vdm_name)
|
||||
else:
|
||||
cifs_servers = copy.deepcopy(servers)
|
||||
|
@ -25,7 +25,6 @@ from six.moves.urllib import request as url_request # pylint: disable=E0611
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.share.drivers.dell_emc.plugins.vmax import constants
|
||||
from manila import utils
|
||||
|
||||
@ -154,8 +153,8 @@ class SSHConnector(object):
|
||||
return out, err
|
||||
except processutils.ProcessExecutionError as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Error running SSH command: %(cmd)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
LOG.error('Error running SSH command: %(cmd)s. '
|
||||
'Error: %(excmsg)s.',
|
||||
{'cmd': command, 'excmsg': e})
|
||||
|
||||
def log_request(self, cmd, out, err):
|
||||
|
@ -24,7 +24,7 @@ import six
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW, _LE
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins.vmax import connector
|
||||
from manila.share.drivers.dell_emc.plugins.vmax import constants
|
||||
from manila.share.drivers.dell_emc.plugins.vmax import utils as vmax_utils
|
||||
@ -89,8 +89,8 @@ class StorageObject(object):
|
||||
constants.STATUS_INFO):
|
||||
response['maxSeverity'] = constants.STATUS_OK
|
||||
|
||||
LOG.warning(_LW("Translated status from %(old)s to %(new)s. "
|
||||
"Message: %(info)s."),
|
||||
LOG.warning("Translated status from %(old)s to %(new)s. "
|
||||
"Message: %(info)s.",
|
||||
{'old': old_Severity,
|
||||
'new': response['maxSeverity'],
|
||||
'info': response})
|
||||
@ -252,8 +252,8 @@ class FileSystem(StorageObject):
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_FILESYSTEM_EXIST):
|
||||
LOG.warning(_LW("File system %s already exists. "
|
||||
"Skip the creation."), name)
|
||||
LOG.warning("File system %s already exists. "
|
||||
"Skip the creation.", name)
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create file system %(name)s. "
|
||||
@ -306,7 +306,7 @@ class FileSystem(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("File system %s not found. Skip the deletion."),
|
||||
LOG.warning("File system %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -440,8 +440,8 @@ class FileSystem(StorageObject):
|
||||
try:
|
||||
self._execute_cmd(copy_ckpt_cmd, check_exit_code=True)
|
||||
except processutils.ProcessExecutionError as expt:
|
||||
LOG.error(_LE("Failed to copy content from snapshot %(snap)s to "
|
||||
"file system %(filesystem)s. Reason: %(err)s."),
|
||||
LOG.error("Failed to copy content from snapshot %(snap)s to "
|
||||
"file system %(filesystem)s. Reason: %(err)s.",
|
||||
{'snap': snap_name,
|
||||
'filesystem': name,
|
||||
'err': expt})
|
||||
@ -576,8 +576,8 @@ class MountPoint(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._is_mount_point_already_existent(response):
|
||||
LOG.warning(_LW("Mount Point %(mount)s already exists. "
|
||||
"Skip the creation."), {'mount': mount_path})
|
||||
LOG.warning("Mount Point %(mount)s already exists. "
|
||||
"Skip the creation.", {'mount': mount_path})
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_('Failed to create Mount Point %(mount)s for '
|
||||
@ -642,8 +642,8 @@ class MountPoint(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._is_mount_point_nonexistent(response):
|
||||
LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s '
|
||||
'not found.'),
|
||||
LOG.warning('Mount point %(mount)s on mover %(mover_name)s '
|
||||
'not found.',
|
||||
{'mount': mount_path, 'mover_name': mover_name})
|
||||
|
||||
return
|
||||
@ -817,8 +817,8 @@ class Mover(StorageObject):
|
||||
lines = out.strip().split('\n')
|
||||
for line in lines:
|
||||
if line.strip().split() == header:
|
||||
LOG.info(_LI('Found the header of the command '
|
||||
'/nas/bin/nas_cel -interconnect -l.'))
|
||||
LOG.info('Found the header of the command '
|
||||
'/nas/bin/nas_cel -interconnect -l.')
|
||||
else:
|
||||
interconn = line.strip().split()
|
||||
if interconn[2] == source and interconn[4] == destination:
|
||||
@ -874,7 +874,7 @@ class VDM(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(response, constants.MSG_VDM_EXIST):
|
||||
LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."),
|
||||
LOG.warning("VDM %(name)s already exists. Skip the creation.",
|
||||
{'name': name})
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create VDM %(name)s on mover "
|
||||
@ -918,7 +918,7 @@ class VDM(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("VDM %s not found. Skip the deletion."),
|
||||
LOG.warning("VDM %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1053,8 +1053,8 @@ class Snapshot(StorageObject):
|
||||
response = self._send_request(request)
|
||||
|
||||
if self._response_validation(response, constants.MSG_SNAP_EXIST):
|
||||
LOG.warning(_LW("Snapshot %(name)s already exists. "
|
||||
"Skip the creation."),
|
||||
LOG.warning("Snapshot %(name)s already exists. "
|
||||
"Skip the creation.",
|
||||
{'name': name})
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create snapshot %(name)s on "
|
||||
@ -1098,7 +1098,7 @@ class Snapshot(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("Snapshot %s not found. Skip the deletion."),
|
||||
LOG.warning("Snapshot %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1182,12 +1182,12 @@ class MoverInterface(StorageObject):
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_NAME_EXIST):
|
||||
LOG.warning(_LW("Mover interface name %s already exists. "
|
||||
"Skip the creation."), name)
|
||||
LOG.warning("Mover interface name %s already exists. "
|
||||
"Skip the creation.", name)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_EXIST):
|
||||
LOG.warning(_LW("Mover interface IP %s already exists. "
|
||||
"Skip the creation."), ip_addr)
|
||||
LOG.warning("Mover interface IP %s already exists. "
|
||||
"Skip the creation.", ip_addr)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_INVALID_VLAN_ID):
|
||||
# When fail to create a mover interface with the specified
|
||||
@ -1246,8 +1246,8 @@ class MoverInterface(StorageObject):
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_NON_EXISTENT):
|
||||
LOG.warning(_LW("Mover interface %s not found. "
|
||||
"Skip the deletion."), ip_addr)
|
||||
LOG.warning("Mover interface %s not found. "
|
||||
"Skip the deletion.", ip_addr)
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to delete mover interface %(ip)s on mover "
|
||||
@ -1316,8 +1316,8 @@ class DNSDomain(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVmaxInvalidMoverID(id=mover_id)
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
LOG.warning(_LW("Failed to delete DNS domain %(name)s. "
|
||||
"Reason: %(err)s."),
|
||||
LOG.warning("Failed to delete DNS domain %(name)s. "
|
||||
"Reason: %(err)s.",
|
||||
{'name': name, 'err': response['problems']})
|
||||
|
||||
|
||||
@ -1508,13 +1508,13 @@ class CIFSServer(StorageObject):
|
||||
status, out = self.get(
|
||||
computer_name.lower(), mover_name, is_vdm, self.xml_retry)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion."),
|
||||
LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion.",
|
||||
{'name': computer_name, 'mover_name': mover_name})
|
||||
return
|
||||
except exception.EMCVmaxXMLAPIError:
|
||||
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion."),
|
||||
LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion.",
|
||||
{'name': computer_name, 'mover_name': mover_name})
|
||||
return
|
||||
|
||||
@ -1606,7 +1606,7 @@ class CIFSShare(StorageObject):
|
||||
def delete(self, name, mover_name, is_vdm=True):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("CIFS share %s not found. Skip the deletion."),
|
||||
LOG.warning("CIFS share %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1684,8 +1684,8 @@ class CIFSShare(StorageObject):
|
||||
dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' %
|
||||
{'domain': domain, 'user': user_name}, re.I)
|
||||
if re.search(dup_msg, expt.stdout):
|
||||
LOG.warning(_LW("Duplicate access control entry, "
|
||||
"skipping allow..."))
|
||||
LOG.warning("Duplicate access control entry, "
|
||||
"skipping allow...")
|
||||
else:
|
||||
message = (_('Failed to allow the access %(access)s to '
|
||||
'CIFS share %(name)s. Reason: %(err)s.') %
|
||||
@ -1716,10 +1716,10 @@ class CIFSShare(StorageObject):
|
||||
% {'domain': domain, 'user': user_name}, re.I)
|
||||
|
||||
if re.search(not_found_msg, expt.stdout):
|
||||
LOG.warning(_LW("No access control entry found, "
|
||||
"skipping deny..."))
|
||||
LOG.warning("No access control entry found, "
|
||||
"skipping deny...")
|
||||
elif re.search(user_err_msg, expt.stdout):
|
||||
LOG.warning(_LW("User not found on domain, skipping deny..."))
|
||||
LOG.warning("User not found on domain, skipping deny...")
|
||||
else:
|
||||
message = (_('Failed to deny the access %(access)s to '
|
||||
'CIFS share %(name)s. Reason: %(err)s.') %
|
||||
@ -1798,7 +1798,7 @@ class NFSShare(StorageObject):
|
||||
|
||||
status, out = self.get(name, mover_name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("NFS share %s not found. Skip the deletion."),
|
||||
LOG.warning("NFS share %s not found. Skip the deletion.",
|
||||
path)
|
||||
return
|
||||
|
||||
@ -1849,7 +1849,7 @@ class NFSShare(StorageObject):
|
||||
dup_msg = (r'%(mover_name)s : No such file or directory' %
|
||||
{'mover_name': mover_name})
|
||||
if re.search(dup_msg, expt.stdout):
|
||||
LOG.warning(_LW("NFS share %s not found."), name)
|
||||
LOG.warning("NFS share %s not found.", name)
|
||||
return constants.STATUS_NOT_FOUND, None
|
||||
else:
|
||||
message = (_('Failed to list NFS share %(name)s on '
|
||||
|
@ -24,7 +24,7 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins import base as driver
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import constants
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import object_manager as manager
|
||||
@ -250,9 +250,9 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Delete a share."""
|
||||
if share_server is None:
|
||||
LOG.warning(_LW("Driver does not support share deletion without "
|
||||
"share network specified. Return directly because "
|
||||
"there is nothing to clean."))
|
||||
LOG.warning("Driver does not support share deletion without "
|
||||
"share network specified. Return directly because "
|
||||
"there is nothing to clean.")
|
||||
return
|
||||
|
||||
share_proto = share['share_proto']
|
||||
@ -545,7 +545,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
",".join(real_pools))
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
LOG.info(_LI("Storage pools: %s will be managed."),
|
||||
LOG.info("Storage pools: %s will be managed.",
|
||||
",".join(matched_pools))
|
||||
else:
|
||||
LOG.debug("No storage pool is specified, so all pools "
|
||||
@ -711,7 +711,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Could not setup server.'))
|
||||
LOG.exception('Could not setup server.')
|
||||
server_details = self._construct_backend_details(
|
||||
vdm_name, allocated_interfaces)
|
||||
self.teardown_server(
|
||||
@ -799,7 +799,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
status, servers = self._get_context('CIFSServer').get_all(
|
||||
vdm_name)
|
||||
if constants.STATUS_OK != status:
|
||||
LOG.error(_LE('Could not find CIFS server by name: %s.'),
|
||||
LOG.error('Could not find CIFS server by name: %s.',
|
||||
vdm_name)
|
||||
else:
|
||||
cifs_servers = copy.deepcopy(servers)
|
||||
|
@ -25,7 +25,6 @@ from six.moves.urllib import request as url_request # pylint: disable=E0611
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import constants
|
||||
from manila import utils
|
||||
|
||||
@ -154,7 +153,7 @@ class SSHConnector(object):
|
||||
return out, err
|
||||
except processutils.ProcessExecutionError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Error running SSH command: %(cmd)s.'),
|
||||
LOG.exception('Error running SSH command: %(cmd)s.',
|
||||
{'cmd': command})
|
||||
|
||||
def log_request(self, cmd, out, err):
|
||||
|
@ -24,7 +24,7 @@ import six
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import connector
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import constants
|
||||
from manila.share.drivers.dell_emc.plugins.vnx import utils as vnx_utils
|
||||
@ -89,8 +89,8 @@ class StorageObject(object):
|
||||
constants.STATUS_INFO):
|
||||
response['maxSeverity'] = constants.STATUS_OK
|
||||
|
||||
LOG.warning(_LW("Translated status from %(old)s to %(new)s. "
|
||||
"Message: %(info)s."),
|
||||
LOG.warning("Translated status from %(old)s to %(new)s. "
|
||||
"Message: %(info)s.",
|
||||
{'old': old_Severity,
|
||||
'new': response['maxSeverity'],
|
||||
'info': response})
|
||||
@ -252,8 +252,8 @@ class FileSystem(StorageObject):
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_FILESYSTEM_EXIST):
|
||||
LOG.warning(_LW("File system %s already exists. "
|
||||
"Skip the creation."), name)
|
||||
LOG.warning("File system %s already exists. "
|
||||
"Skip the creation.", name)
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create file system %(name)s. "
|
||||
@ -306,7 +306,7 @@ class FileSystem(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("File system %s not found. Skip the deletion."),
|
||||
LOG.warning("File system %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -440,8 +440,8 @@ class FileSystem(StorageObject):
|
||||
try:
|
||||
self._execute_cmd(copy_ckpt_cmd, check_exit_code=True)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_LE("Failed to copy content from snapshot %(snap)s "
|
||||
"to file system %(filesystem)s."),
|
||||
LOG.exception("Failed to copy content from snapshot %(snap)s "
|
||||
"to file system %(filesystem)s.",
|
||||
{'snap': snap_name,
|
||||
'filesystem': name})
|
||||
|
||||
@ -575,8 +575,8 @@ class MountPoint(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._is_mount_point_already_existent(response):
|
||||
LOG.warning(_LW("Mount Point %(mount)s already exists. "
|
||||
"Skip the creation."), {'mount': mount_path})
|
||||
LOG.warning("Mount Point %(mount)s already exists. "
|
||||
"Skip the creation.", {'mount': mount_path})
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_('Failed to create Mount Point %(mount)s for '
|
||||
@ -641,8 +641,8 @@ class MountPoint(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._is_mount_point_nonexistent(response):
|
||||
LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s '
|
||||
'not found.'),
|
||||
LOG.warning('Mount point %(mount)s on mover %(mover_name)s '
|
||||
'not found.',
|
||||
{'mount': mount_path, 'mover_name': mover_name})
|
||||
|
||||
return
|
||||
@ -816,8 +816,8 @@ class Mover(StorageObject):
|
||||
lines = out.strip().split('\n')
|
||||
for line in lines:
|
||||
if line.strip().split() == header:
|
||||
LOG.info(_LI('Found the header of the command '
|
||||
'/nas/bin/nas_cel -interconnect -l.'))
|
||||
LOG.info('Found the header of the command '
|
||||
'/nas/bin/nas_cel -interconnect -l.')
|
||||
else:
|
||||
interconn = line.strip().split()
|
||||
if interconn[2] == source and interconn[4] == destination:
|
||||
@ -873,7 +873,7 @@ class VDM(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(response, constants.MSG_VDM_EXIST):
|
||||
LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."),
|
||||
LOG.warning("VDM %(name)s already exists. Skip the creation.",
|
||||
{'name': name})
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create VDM %(name)s on mover "
|
||||
@ -917,7 +917,7 @@ class VDM(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("VDM %s not found. Skip the deletion."),
|
||||
LOG.warning("VDM %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1052,8 +1052,8 @@ class Snapshot(StorageObject):
|
||||
response = self._send_request(request)
|
||||
|
||||
if self._response_validation(response, constants.MSG_SNAP_EXIST):
|
||||
LOG.warning(_LW("Snapshot %(name)s already exists. "
|
||||
"Skip the creation."),
|
||||
LOG.warning("Snapshot %(name)s already exists. "
|
||||
"Skip the creation.",
|
||||
{'name': name})
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to create snapshot %(name)s on "
|
||||
@ -1097,7 +1097,7 @@ class Snapshot(StorageObject):
|
||||
def delete(self, name):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("Snapshot %s not found. Skip the deletion."),
|
||||
LOG.warning("Snapshot %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1181,13 +1181,13 @@ class MoverInterface(StorageObject):
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_NAME_EXIST):
|
||||
LOG.warning(_LW("Mover interface name %s already exists. "
|
||||
"Skip the creation."), name)
|
||||
LOG.warning("Mover interface name %s already exists. "
|
||||
"Skip the creation.", name)
|
||||
return
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_EXIST):
|
||||
LOG.warning(_LW("Mover interface IP %s already exists. "
|
||||
"Skip the creation."), ip_addr)
|
||||
LOG.warning("Mover interface IP %s already exists. "
|
||||
"Skip the creation.", ip_addr)
|
||||
return
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_INVALID_VLAN_ID):
|
||||
@ -1247,8 +1247,8 @@ class MoverInterface(StorageObject):
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif self._response_validation(
|
||||
response, constants.MSG_INTERFACE_NON_EXISTENT):
|
||||
LOG.warning(_LW("Mover interface %s not found. "
|
||||
"Skip the deletion."), ip_addr)
|
||||
LOG.warning("Mover interface %s not found. "
|
||||
"Skip the deletion.", ip_addr)
|
||||
return
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
message = (_("Failed to delete mover interface %(ip)s on mover "
|
||||
@ -1317,8 +1317,8 @@ class DNSDomain(StorageObject):
|
||||
self.xml_retry = True
|
||||
raise exception.EMCVnxInvalidMoverID(id=mover_id)
|
||||
elif constants.STATUS_OK != response['maxSeverity']:
|
||||
LOG.warning(_LW("Failed to delete DNS domain %(name)s. "
|
||||
"Reason: %(err)s."),
|
||||
LOG.warning("Failed to delete DNS domain %(name)s. "
|
||||
"Reason: %(err)s.",
|
||||
{'name': name, 'err': response['problems']})
|
||||
|
||||
|
||||
@ -1509,13 +1509,13 @@ class CIFSServer(StorageObject):
|
||||
status, out = self.get(
|
||||
computer_name.lower(), mover_name, is_vdm, self.xml_retry)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion."),
|
||||
LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion.",
|
||||
{'name': computer_name, 'mover_name': mover_name})
|
||||
return
|
||||
except exception.EMCVnxXMLAPIError:
|
||||
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion."),
|
||||
LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
|
||||
"not found. Skip the deletion.",
|
||||
{'name': computer_name, 'mover_name': mover_name})
|
||||
return
|
||||
|
||||
@ -1607,7 +1607,7 @@ class CIFSShare(StorageObject):
|
||||
def delete(self, name, mover_name, is_vdm=True):
|
||||
status, out = self.get(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("CIFS share %s not found. Skip the deletion."),
|
||||
LOG.warning("CIFS share %s not found. Skip the deletion.",
|
||||
name)
|
||||
return
|
||||
elif constants.STATUS_OK != status:
|
||||
@ -1685,8 +1685,8 @@ class CIFSShare(StorageObject):
|
||||
dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' %
|
||||
{'domain': domain, 'user': user_name}, re.I)
|
||||
if re.search(dup_msg, expt.stdout):
|
||||
LOG.warning(_LW("Duplicate access control entry, "
|
||||
"skipping allow..."))
|
||||
LOG.warning("Duplicate access control entry, "
|
||||
"skipping allow...")
|
||||
else:
|
||||
message = (_('Failed to allow the access %(access)s to '
|
||||
'CIFS share %(name)s. Reason: %(err)s.') %
|
||||
@ -1717,10 +1717,10 @@ class CIFSShare(StorageObject):
|
||||
% {'domain': domain, 'user': user_name}, re.I)
|
||||
|
||||
if re.search(not_found_msg, expt.stdout):
|
||||
LOG.warning(_LW("No access control entry found, "
|
||||
"skipping deny..."))
|
||||
LOG.warning("No access control entry found, "
|
||||
"skipping deny...")
|
||||
elif re.search(user_err_msg, expt.stdout):
|
||||
LOG.warning(_LW("User not found on domain, skipping deny..."))
|
||||
LOG.warning("User not found on domain, skipping deny...")
|
||||
else:
|
||||
message = (_('Failed to deny the access %(access)s to '
|
||||
'CIFS share %(name)s. Reason: %(err)s.') %
|
||||
@ -1799,7 +1799,7 @@ class NFSShare(StorageObject):
|
||||
|
||||
status, out = self.get(name, mover_name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warning(_LW("NFS share %s not found. Skip the deletion."),
|
||||
LOG.warning("NFS share %s not found. Skip the deletion.",
|
||||
path)
|
||||
return
|
||||
|
||||
@ -1850,7 +1850,7 @@ class NFSShare(StorageObject):
|
||||
dup_msg = (r'%(mover_name)s : No such file or directory' %
|
||||
{'mover_name': mover_name})
|
||||
if re.search(dup_msg, expt.stdout):
|
||||
LOG.warning(_LW("NFS share %s not found."), name)
|
||||
LOG.warning("NFS share %s not found.", name)
|
||||
return constants.STATUS_NOT_FOUND, None
|
||||
else:
|
||||
message = (_('Failed to list NFS share %(name)s on '
|
||||
|
@ -24,7 +24,6 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _LI
|
||||
from manila.share.drivers.ganesha import manager as ganesha_manager
|
||||
from manila.share.drivers.ganesha import utils as ganesha_utils
|
||||
|
||||
@ -76,7 +75,7 @@ class GaneshaNASHelper(NASHelperBase):
|
||||
if e.errno != errno.ENOENT or must_exist:
|
||||
raise
|
||||
dirlist = []
|
||||
LOG.info(_LI('Loading Ganesha config from %s.'), dirpath)
|
||||
LOG.info('Loading Ganesha config from %s.', dirpath)
|
||||
conf_files = list(filter(self._confrx.search, dirlist))
|
||||
conf_files.sort()
|
||||
export_template = {}
|
||||
|
@ -24,7 +24,6 @@ import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.share.drivers.ganesha import utils as ganesha_utils
|
||||
from manila import utils
|
||||
|
||||
@ -187,8 +186,8 @@ class GaneshaManager(object):
|
||||
except exception.ProcessExecutionError as e:
|
||||
if makelog:
|
||||
LOG.error(
|
||||
_LE("Error while executing management command on "
|
||||
"Ganesha node %(tag)s: %(msg)s."),
|
||||
("Error while executing management command on "
|
||||
"Ganesha node %(tag)s: %(msg)s."),
|
||||
{'tag': tag, 'msg': msg})
|
||||
raise exception.GaneshaCommandFailure(
|
||||
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
|
||||
@ -324,8 +323,8 @@ class GaneshaManager(object):
|
||||
run_as_root=False)[0]
|
||||
match = re.search('\Aexportid\|(\d+)$', out)
|
||||
if not match:
|
||||
LOG.error(_LE("Invalid export database on "
|
||||
"Ganesha node %(tag)s: %(db)s."),
|
||||
LOG.error("Invalid export database on "
|
||||
"Ganesha node %(tag)s: %(db)s.",
|
||||
{'tag': self.tag, 'db': self.ganesha_db_path})
|
||||
raise exception.InvalidSqliteDB()
|
||||
return int(match.groups()[0])
|
||||
|
@ -30,7 +30,7 @@ from manila.common import constants as const
|
||||
from manila import compute
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers import service_instance
|
||||
from manila import utils
|
||||
@ -193,11 +193,11 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
|
||||
if not common_sv_available:
|
||||
time.sleep(sv_fetch_retry_interval)
|
||||
LOG.warning(_LW("Waiting for the common service VM to become "
|
||||
"available. "
|
||||
"Driver is currently uninitialized. "
|
||||
"Share server: %(share_server)s "
|
||||
"Retry interval: %(retry_interval)s"),
|
||||
LOG.warning("Waiting for the common service VM to become "
|
||||
"available. "
|
||||
"Driver is currently uninitialized. "
|
||||
"Share server: %(share_server)s "
|
||||
"Retry interval: %(retry_interval)s",
|
||||
dict(share_server=share_server,
|
||||
retry_interval=sv_fetch_retry_interval))
|
||||
|
||||
@ -293,14 +293,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.error(_LE("Failed to sync mount files on server '%s'."),
|
||||
LOG.error("Failed to sync mount files on server '%s'.",
|
||||
server_details['instance_id'])
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
try:
|
||||
# Remount it to avoid postponed point of failure
|
||||
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.error(_LE("Failed to mount all shares on server '%s'."),
|
||||
LOG.error("Failed to mount all shares on server '%s'.",
|
||||
server_details['instance_id'])
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
|
||||
@ -346,8 +346,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
# Add mount permanently
|
||||
self._sync_mount_temp_and_perm_files(server_details)
|
||||
else:
|
||||
LOG.warning(_LW("Mount point '%(path)s' already exists on "
|
||||
"server '%(server)s'."), log_data)
|
||||
LOG.warning("Mount point '%(path)s' already exists on "
|
||||
"server '%(server)s'.", log_data)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
return _mount_device_with_lock()
|
||||
@ -373,8 +373,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
# Remove mount permanently
|
||||
self._sync_mount_temp_and_perm_files(server_details)
|
||||
else:
|
||||
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
|
||||
"server '%(server)s'."), log_data)
|
||||
LOG.warning("Mount point '%(path)s' does not exist on "
|
||||
"server '%(server)s'.", log_data)
|
||||
return _unmount_device_with_lock()
|
||||
|
||||
def _get_mount_path(self, share):
|
||||
@ -449,10 +449,10 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
return volumes_list[0]
|
||||
elif len(volumes_list) > 1:
|
||||
LOG.error(
|
||||
_LE("Expected only one volume in volume list with name "
|
||||
"'%(name)s', but got more than one in a result - "
|
||||
"'%(result)s'."), {
|
||||
'name': volume_name, 'result': volumes_list})
|
||||
"Expected only one volume in volume list with name "
|
||||
"'%(name)s', but got more than one in a result - "
|
||||
"'%(result)s'.", {
|
||||
'name': volume_name, 'result': volumes_list})
|
||||
raise exception.ManilaException(
|
||||
_("Error. Ambiguous volumes for name '%s'") % volume_name)
|
||||
return None
|
||||
@ -479,11 +479,11 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
volume_snapshot = volume_snapshot_list[0]
|
||||
elif len(volume_snapshot_list) > 1:
|
||||
LOG.error(
|
||||
_LE("Expected only one volume snapshot in list with name "
|
||||
"'%(name)s', but got more than one in a result - "
|
||||
"'%(result)s'."), {
|
||||
'name': volume_snapshot_name,
|
||||
'result': volume_snapshot_list})
|
||||
"Expected only one volume snapshot in list with name"
|
||||
"'%(name)s', but got more than one in a result - "
|
||||
"'%(result)s'.", {
|
||||
'name': volume_snapshot_name,
|
||||
'result': volume_snapshot_list})
|
||||
raise exception.ManilaException(
|
||||
_('Error. Ambiguous volume snaphots'))
|
||||
return volume_snapshot
|
||||
@ -501,8 +501,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
volume = self._get_volume(context, share['id'])
|
||||
except exception.VolumeNotFound:
|
||||
LOG.warning(_LW("Volume not found for share %s. "
|
||||
"Possibly already deleted."), share['id'])
|
||||
LOG.warning("Volume not found for share %s. "
|
||||
"Possibly already deleted.", share['id'])
|
||||
volume = None
|
||||
if volume and volume['id'] in attached_volumes:
|
||||
self.compute_api.instance_volume_detach(
|
||||
@ -587,7 +587,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
volume = self._get_volume(context, share['id'])
|
||||
except exception.VolumeNotFound:
|
||||
LOG.info(_LI("Volume not found. Already deleted?"))
|
||||
LOG.info("Volume not found. Already deleted?")
|
||||
volume = None
|
||||
if volume:
|
||||
if volume['status'] == 'in-use':
|
||||
|
@ -24,7 +24,7 @@ from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.ganesha import utils as ganesha_utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -205,7 +205,7 @@ class GlusterManager(object):
|
||||
exc.exit_code in error_policy):
|
||||
return
|
||||
if logmsg:
|
||||
LOG.error(_LE("%s: GlusterFS instrumentation failed.") %
|
||||
LOG.error("%s: GlusterFS instrumentation failed." %
|
||||
logmsg)
|
||||
raise exception.GlusterfsException(
|
||||
_("GlusterFS management command '%(cmd)s' failed "
|
||||
@ -248,7 +248,7 @@ class GlusterManager(object):
|
||||
def _get_vol_option_via_info(self, option):
|
||||
"""Get the value of an option set on a GlusterFS volume via volinfo."""
|
||||
args = ('--xml', 'volume', 'info', self.volume)
|
||||
out, err = self.gluster_call(*args, log=_LE("retrieving volume info"))
|
||||
out, err = self.gluster_call(*args, log=("retrieving volume info"))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
@ -332,7 +332,7 @@ class GlusterManager(object):
|
||||
:returns: version (as tuple of strings, example: ('3', '6', '0beta2'))
|
||||
"""
|
||||
out, err = self.gluster_call('--version',
|
||||
log=_LE("GlusterFS version query"))
|
||||
log=("GlusterFS version query"))
|
||||
try:
|
||||
owords = out.split()
|
||||
if owords[0] != 'glusterfs':
|
||||
@ -393,7 +393,7 @@ def _mount_gluster_vol(execute, gluster_export, mount_path, ensure=False):
|
||||
execute(*command, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in exc.stderr:
|
||||
LOG.warning(_LW("%s is already mounted."), gluster_export)
|
||||
LOG.warning("%s is already mounted.", gluster_export)
|
||||
else:
|
||||
raise exception.GlusterfsException(
|
||||
'Unable to mount Gluster volume'
|
||||
@ -431,8 +431,8 @@ def _restart_gluster_vol(gluster_mgr):
|
||||
# this odd-behaviour of Gluster-CLI.
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'stop', gluster_mgr.volume, '--mode=script',
|
||||
log=_LE("stopping GlusterFS volume %s") % gluster_mgr.volume)
|
||||
log=("stopping GlusterFS volume %s") % gluster_mgr.volume)
|
||||
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'start', gluster_mgr.volume,
|
||||
log=_LE("starting GlusterFS volume %s") % gluster_mgr.volume)
|
||||
log=("starting GlusterFS volume %s") % gluster_mgr.volume)
|
||||
|
@ -22,7 +22,7 @@ from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.glusterfs import common
|
||||
from manila.share.drivers.glusterfs import layout
|
||||
|
||||
@ -82,8 +82,8 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
except exception.GlusterfsException:
|
||||
if (self.gluster_manager.
|
||||
get_vol_option('features.quota')) != 'on':
|
||||
LOG.exception(_LE("Error in tuning GlusterFS volume to enable "
|
||||
"creation of shares of specific size."))
|
||||
LOG.exception("Error in tuning GlusterFS volume to enable "
|
||||
"creation of shares of specific size.")
|
||||
raise
|
||||
|
||||
self._ensure_gluster_vol_mounted()
|
||||
@ -106,7 +106,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
self.gluster_manager.export, mount_path,
|
||||
ensure=True)
|
||||
except exception.GlusterfsException:
|
||||
LOG.exception(_LE('Could not mount the Gluster volume %s'),
|
||||
LOG.exception('Could not mount the Gluster volume %s',
|
||||
self.gluster_manager.volume)
|
||||
raise
|
||||
|
||||
@ -152,7 +152,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
exc = exception.GlusterfsException(exc)
|
||||
if isinstance(exc, exception.GlusterfsException):
|
||||
self._cleanup_create_share(local_share_path, share['name'])
|
||||
LOG.error(_LE('Unable to create share %s'), share['name'])
|
||||
LOG.error('Unable to create share %s', share['name'])
|
||||
raise exc
|
||||
|
||||
comp_share = self.gluster_manager.components.copy()
|
||||
@ -170,9 +170,9 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
try:
|
||||
self.driver._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE('Cannot cleanup share, %s, that errored out '
|
||||
'during its creation, but exists in GlusterFS '
|
||||
'volume.'), share_name)
|
||||
LOG.error('Cannot cleanup share, %s, that errored out '
|
||||
'during its creation, but exists in GlusterFS '
|
||||
'volume.', share_name)
|
||||
raise exception.GlusterfsException(exc)
|
||||
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
@ -182,7 +182,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
try:
|
||||
self.driver._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_LE('Unable to delete share %s'), share['name'])
|
||||
LOG.exception('Unable to delete share %s', share['name'])
|
||||
raise
|
||||
|
||||
def ensure_share(self, context, share, share_server=None):
|
||||
|
@ -28,7 +28,7 @@ from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.glusterfs import common
|
||||
from manila.share.drivers.glusterfs import layout
|
||||
from manila import utils
|
||||
@ -129,8 +129,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
exceptions[srvaddr] = six.text_type(exc)
|
||||
if exceptions:
|
||||
for srvaddr, excmsg in exceptions.items():
|
||||
LOG.error(_LE("'gluster version' failed on server "
|
||||
"%(server)s with: %(message)s"),
|
||||
LOG.error("'gluster version' failed on server "
|
||||
"%(server)s with: %(message)s",
|
||||
{'server': srvaddr, 'message': excmsg})
|
||||
raise exception.GlusterfsException(_(
|
||||
"'gluster version' failed on servers %s") % (
|
||||
@ -143,9 +143,9 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
gluster_version_min_str = '.'.join(
|
||||
six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
|
||||
for srvaddr in notsupp_servers:
|
||||
LOG.error(_LE("GlusterFS version %(version)s on server "
|
||||
"%(server)s is not supported, "
|
||||
"minimum requirement: %(minvers)s"),
|
||||
LOG.error("GlusterFS version %(version)s on server "
|
||||
"%(server)s is not supported, "
|
||||
"minimum requirement: %(minvers)s",
|
||||
{'server': srvaddr,
|
||||
'version': '.'.join(glusterfs_versions[srvaddr]),
|
||||
'minvers': gluster_version_min_str})
|
||||
@ -167,8 +167,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
LOG.info(_LI("Found %d Gluster volumes allocated for Manila."
|
||||
), len(gluster_volumes_initial))
|
||||
LOG.info("Found %d Gluster volumes allocated for Manila.",
|
||||
len(gluster_volumes_initial))
|
||||
|
||||
self._check_mount_glusterfs()
|
||||
|
||||
@ -203,10 +203,10 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
gluster_mgr = self._glustermanager(srvaddr, False)
|
||||
if gluster_mgr.user:
|
||||
logmsg = _LE("Retrieving volume list "
|
||||
"on host %s") % gluster_mgr.host
|
||||
logmsg = ("Retrieving volume list "
|
||||
"on host %s") % gluster_mgr.host
|
||||
else:
|
||||
logmsg = _LE("Retrieving volume list")
|
||||
logmsg = ("Retrieving volume list")
|
||||
out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg)
|
||||
for volname in out.split("\n"):
|
||||
patmatch = self.volume_pattern.match(volname)
|
||||
@ -251,17 +251,17 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
|
||||
if not unused_vols:
|
||||
# No volumes available for use as share. Warn user.
|
||||
LOG.warning(_LW("No unused gluster volumes available for use as "
|
||||
"share! Create share won't be supported unless "
|
||||
"existing shares are deleted or some gluster "
|
||||
"volumes are created with names matching "
|
||||
"'glusterfs_volume_pattern'."))
|
||||
LOG.warning("No unused gluster volumes available for use as "
|
||||
"share! Create share won't be supported unless "
|
||||
"existing shares are deleted or some gluster "
|
||||
"volumes are created with names matching "
|
||||
"'glusterfs_volume_pattern'.")
|
||||
else:
|
||||
LOG.info(_LI("Number of gluster volumes in use: "
|
||||
"%(inuse-numvols)s. Number of gluster volumes "
|
||||
"available for use as share: %(unused-numvols)s"),
|
||||
LOG.info("Number of gluster volumes in use: "
|
||||
"%(inuse-numvols)s. Number of gluster volumes "
|
||||
"available for use as share: %(unused-numvols)s",
|
||||
{'inuse-numvols': len(self.gluster_used_vols),
|
||||
'unused-numvols': len(unused_vols)})
|
||||
'unused-numvols': len(unused_vols)})
|
||||
|
||||
# volmap is the data structure used to categorize and sort
|
||||
# the unused volumes. It's a nested dictionary of structure
|
||||
@ -385,7 +385,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
try:
|
||||
vol = self._pop_gluster_vol(share['size'])
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error creating share %(share_id)s"),
|
||||
msg = ("Error creating share %(share_id)s",
|
||||
{'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
@ -401,7 +401,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
|
||||
# For native protocol, the export_location should be of the form:
|
||||
# server:/volname
|
||||
LOG.info(_LI("export_location sent back from create_share: %s"),
|
||||
LOG.info("export_location sent back from create_share: %s",
|
||||
export)
|
||||
return export
|
||||
|
||||
@ -436,8 +436,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
|
||||
self._push_gluster_vol(gmgr.qualified)
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error during delete_share request for "
|
||||
"share %(share_id)s"), {'share_id': share['id']})
|
||||
msg = ("Error during delete_share request for "
|
||||
"share %(share_id)s", {'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
@ -449,7 +449,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
|
||||
out, err = gluster_mgr.gluster_call(
|
||||
*args,
|
||||
log=_LE("Retrieving snapshot list"))
|
||||
log=("Retrieving snapshot list"))
|
||||
snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
|
||||
if len(snapgrep) != 1:
|
||||
msg = (_("Failed to identify backing GlusterFS object "
|
||||
@ -493,7 +493,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
for args in args_tuple:
|
||||
out, err = old_gmgr.gluster_call(
|
||||
*args,
|
||||
log=_LE("Creating share from snapshot"))
|
||||
log=("Creating share from snapshot"))
|
||||
|
||||
# Get a manager for the new volume/share.
|
||||
comp_vol = old_gmgr.components.copy()
|
||||
@ -509,7 +509,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
('start', []))
|
||||
for op, opargs in argseq:
|
||||
args = ['volume', op, gmgr.volume] + opargs
|
||||
gmgr.gluster_call(*args, log=_LE("Creating share from snapshot"))
|
||||
gmgr.gluster_call(*args, log=("Creating share from snapshot"))
|
||||
|
||||
self.gluster_used_vols.add(gmgr.qualified)
|
||||
self.private_storage.update(share['id'], {'volume': gmgr.qualified})
|
||||
@ -528,7 +528,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
gluster_mgr.volume)
|
||||
out, err = gluster_mgr.gluster_call(
|
||||
*args,
|
||||
log=_LE("Retrieving volume info"))
|
||||
log=("Retrieving volume info"))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
@ -570,7 +570,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
'--mode=script')
|
||||
out, err = gluster_mgr.gluster_call(
|
||||
*args,
|
||||
log=_LE("Error deleting snapshot"))
|
||||
log=("Error deleting snapshot"))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
|
@ -20,7 +20,7 @@ from oslo_log import log
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -242,12 +242,12 @@ class NFSHelper(NASHelperBase):
|
||||
(const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW))
|
||||
except (exception.InvalidShareAccess,
|
||||
exception.InvalidShareAccessLevel):
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"Unsupported access level %(level)s or access type "
|
||||
"%(type)s, skipping removal of access rule to "
|
||||
"%(to)s.") % {'level': access['access_level'],
|
||||
'type': access['access_type'],
|
||||
'to': access['access_to']})
|
||||
"%(to)s." % {'level': access['access_level'],
|
||||
'type': access['access_type'],
|
||||
'to': access['access_to']})
|
||||
continue
|
||||
self._ssh_exec(server, ['sudo', 'exportfs', '-u',
|
||||
':'.join((access['access_to'], local_path))])
|
||||
@ -260,12 +260,12 @@ class NFSHelper(NASHelperBase):
|
||||
re.escape(local_path) + '[\s\n]*' + re.escape(
|
||||
access['access_to']), out)
|
||||
if found_item is not None:
|
||||
LOG.warning(_LW("Access rule %(type)s:%(to)s already "
|
||||
"exists for share %(name)s") % {
|
||||
'to': access['access_to'],
|
||||
'type': access['access_type'],
|
||||
'name': share_name
|
||||
})
|
||||
LOG.warning("Access rule %(type)s:%(to)s already "
|
||||
"exists for share %(name)s" % {
|
||||
'to': access['access_to'],
|
||||
'type': access['access_type'],
|
||||
'name': share_name
|
||||
})
|
||||
else:
|
||||
rules_options = '%s,no_subtree_check'
|
||||
if access['access_level'] == const.ACCESS_LEVEL_RW:
|
||||
@ -433,8 +433,8 @@ class CIFSHelperIPAccess(CIFSHelperBase):
|
||||
self._ssh_exec(
|
||||
server, ['sudo', 'net', 'conf', 'delshare', share_name])
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Caught error trying delete share: %(error)s, try"
|
||||
"ing delete it forcibly."), {'error': e.stderr})
|
||||
LOG.warning("Caught error trying delete share: %(error)s, try"
|
||||
"ing delete it forcibly.", {'error': e.stderr})
|
||||
self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share',
|
||||
share_name])
|
||||
|
||||
|
@ -23,7 +23,7 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share import utils
|
||||
|
||||
@ -280,10 +280,10 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
|
||||
for rule in delete_rules:
|
||||
if rule['access_type'].lower() != 'user':
|
||||
LOG.warning(_LW('Only USER access type is allowed for '
|
||||
'CIFS. %(entity_type)s '
|
||||
'provided %(share)s with '
|
||||
'protocol %(proto)s.'),
|
||||
LOG.warning('Only USER access type is allowed for '
|
||||
'CIFS. %(entity_type)s '
|
||||
'provided %(share)s with '
|
||||
'protocol %(proto)s.',
|
||||
{'entity_type': entity_type.capitalize(),
|
||||
'share': share_or_snapshot['id'],
|
||||
'proto': share_proto})
|
||||
@ -411,7 +411,7 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
'snap_id': snapshot['id']})
|
||||
|
||||
export_locations = self._create_snapshot(hnas_share_id, snapshot)
|
||||
LOG.info(_LI("Snapshot %(id)s successfully created."),
|
||||
LOG.info("Snapshot %(id)s successfully created.",
|
||||
{'id': snapshot['id']})
|
||||
|
||||
output = {
|
||||
@ -443,7 +443,7 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
self._delete_snapshot(snapshot['share'],
|
||||
hnas_share_id, hnas_snapshot_id)
|
||||
|
||||
LOG.info(_LI("Snapshot %(id)s successfully deleted."),
|
||||
LOG.info("Snapshot %(id)s successfully deleted.",
|
||||
{'id': snapshot['id']})
|
||||
|
||||
def create_share_from_snapshot(self, context, share, snapshot,
|
||||
@ -598,8 +598,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
{'shr_id': share['id']})
|
||||
|
||||
self._extend_share(hnas_share_id, share, new_size)
|
||||
LOG.info(_LI("Share %(shr_id)s successfully extended to "
|
||||
"%(shr_size)s."),
|
||||
LOG.info("Share %(shr_id)s successfully extended to "
|
||||
"%(shr_size)s.",
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': six.text_type(new_size)})
|
||||
|
||||
@ -639,7 +639,7 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
'mount_snapshot_support': True,
|
||||
}
|
||||
|
||||
LOG.info(_LI("HNAS Capabilities: %(data)s."),
|
||||
LOG.info("HNAS Capabilities: %(data)s.",
|
||||
{'data': six.text_type(data)})
|
||||
|
||||
super(HitachiHNASDriver, self)._update_share_stats(data)
|
||||
@ -719,8 +719,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
|
||||
if share['share_proto'].lower() == 'nfs':
|
||||
# 10.0.0.1:/shares/example
|
||||
LOG.info(_LI("Share %(shr_path)s will be managed with ID "
|
||||
"%(shr_id)s."),
|
||||
LOG.info("Share %(shr_path)s will be managed with ID "
|
||||
"%(shr_id)s.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
@ -764,8 +764,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
"Share ID %(share_id)s", {'hnas_id': hnas_share_id,
|
||||
'share_id': share['id']})
|
||||
|
||||
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s."),
|
||||
LOG.info("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
@ -779,13 +779,13 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
self.private_storage.delete(share['id'])
|
||||
|
||||
if len(share['export_locations']) == 0:
|
||||
LOG.info(_LI("The share with ID %(shr_id)s is no longer being "
|
||||
"managed."), {'shr_id': share['id']})
|
||||
LOG.info("The share with ID %(shr_id)s is no longer being "
|
||||
"managed.", {'shr_id': share['id']})
|
||||
else:
|
||||
LOG.info(_LI("The share with current path %(shr_path)s and ID "
|
||||
"%(shr_id)s is no longer being managed."),
|
||||
LOG.info("The share with current path %(shr_path)s and ID "
|
||||
"%(shr_id)s is no longer being managed.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
'shr_id': share['id']})
|
||||
|
||||
def shrink_share(self, share, new_size, share_server=None):
|
||||
"""Shrinks a share to new size.
|
||||
@ -801,8 +801,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
{'shr_id': share['id']})
|
||||
|
||||
self._shrink_share(hnas_share_id, share, new_size)
|
||||
LOG.info(_LI("Share %(shr_id)s successfully shrunk to "
|
||||
"%(shr_size)sG."),
|
||||
LOG.info("Share %(shr_id)s successfully shrunk to "
|
||||
"%(shr_size)sG.",
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': six.text_type(new_size)})
|
||||
|
||||
@ -836,12 +836,12 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
try:
|
||||
self.hnas.tree_clone(src_path, dest_path)
|
||||
except exception.HNASNothingToCloneException:
|
||||
LOG.warning(_LW("Source directory is empty, creating an empty "
|
||||
"directory."))
|
||||
LOG.warning("Source directory is empty, creating an empty "
|
||||
"directory.")
|
||||
|
||||
LOG.info(_LI("Share %(share)s successfully reverted to snapshot "
|
||||
"%(snapshot)s."), {'share': snapshot['share_id'],
|
||||
'snapshot': snapshot['id']})
|
||||
LOG.info("Share %(share)s successfully reverted to snapshot "
|
||||
"%(snapshot)s.", {'share': snapshot['share_id'],
|
||||
'snapshot': snapshot['id']})
|
||||
|
||||
def _get_hnas_share_id(self, share_id):
|
||||
hnas_id = self.private_storage.get(share_id, 'hnas_id')
|
||||
@ -1056,8 +1056,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
try:
|
||||
self.hnas.tree_clone(src_path, dest_path)
|
||||
except exception.HNASNothingToCloneException:
|
||||
LOG.warning(_LW("Source directory is empty, creating an empty "
|
||||
"directory."))
|
||||
LOG.warning("Source directory is empty, creating an empty "
|
||||
"directory.")
|
||||
self.hnas.create_directory(dest_path)
|
||||
finally:
|
||||
if share_proto.lower() == 'nfs':
|
||||
@ -1125,8 +1125,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
try:
|
||||
self.hnas.tree_clone(src_path, dest_path)
|
||||
except exception.HNASNothingToCloneException:
|
||||
LOG.warning(_LW("Source directory is empty, exporting "
|
||||
"directory."))
|
||||
LOG.warning("Source directory is empty, exporting "
|
||||
"directory.")
|
||||
|
||||
self._check_protocol(share['id'], share['share_proto'])
|
||||
|
||||
@ -1348,8 +1348,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
try:
|
||||
self._ensure_snapshot(snapshot, hnas_snapshot_id)
|
||||
except exception.HNASItemNotFoundException:
|
||||
LOG.warning(_LW("Export does not exist for snapshot %s, "
|
||||
"creating a new one."), snapshot['id'])
|
||||
LOG.warning("Export does not exist for snapshot %s, "
|
||||
"creating a new one.", snapshot['id'])
|
||||
self._create_export(hnas_share_id,
|
||||
snapshot['share']['share_proto'],
|
||||
snapshot_id=hnas_snapshot_id)
|
||||
@ -1362,8 +1362,8 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
is_snapshot=True)
|
||||
output['export_locations'] = export_locations
|
||||
|
||||
LOG.info(_LI("Snapshot %(snap_path)s for share %(shr_id)s was "
|
||||
"successfully managed with ID %(snap_id)s."),
|
||||
LOG.info("Snapshot %(snap_path)s for share %(shr_id)s was "
|
||||
"successfully managed with ID %(snap_id)s.",
|
||||
{'snap_path': snapshot['provider_location'],
|
||||
'shr_id': snapshot['share_id'],
|
||||
'snap_id': snapshot['id']})
|
||||
@ -1375,9 +1375,9 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
|
||||
:param snapshot: Snapshot that will be unmanaged.
|
||||
"""
|
||||
LOG.info(_LI("The snapshot with ID %(snap_id)s from share "
|
||||
"%(share_id)s is no longer being managed by Manila. "
|
||||
"However, it is not deleted and can be found in HNAS."),
|
||||
LOG.info("The snapshot with ID %(snap_id)s from share "
|
||||
"%(share_id)s is no longer being managed by Manila. "
|
||||
"However, it is not deleted and can be found in HNAS.",
|
||||
{'snap_id': snapshot['id'],
|
||||
'share_id': snapshot['share_id']})
|
||||
|
||||
|
@ -24,7 +24,7 @@ import os
|
||||
import time
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila import utils as mutils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -98,8 +98,8 @@ class HNASSSHBackend(object):
|
||||
self._execute(command)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'does not exist' in e.stderr:
|
||||
LOG.warning(_LW("Export %s does not exist on "
|
||||
"backend anymore."), name)
|
||||
LOG.warning("Export %s does not exist on "
|
||||
"backend anymore.", name)
|
||||
else:
|
||||
msg = _("Could not delete NFS export %s.") % name
|
||||
LOG.exception(msg)
|
||||
@ -128,8 +128,8 @@ class HNASSSHBackend(object):
|
||||
self._execute(command)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if e.exit_code == 1:
|
||||
LOG.warning(_LW("CIFS share %s does not exist on "
|
||||
"backend anymore."), name)
|
||||
LOG.warning("CIFS share %s does not exist on "
|
||||
"backend anymore.", name)
|
||||
else:
|
||||
msg = _("Could not delete CIFS share %s.") % name
|
||||
LOG.exception(msg)
|
||||
@ -232,18 +232,18 @@ class HNASSSHBackend(object):
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if ('not listed as a user' in e.stderr or
|
||||
'Could not delete user/group' in e.stderr):
|
||||
LOG.warning(_LW('User %(user)s already not allowed to access '
|
||||
'%(entity_type)s %(name)s.'), {
|
||||
'entity_type': entity_type,
|
||||
'user': user,
|
||||
'name': name
|
||||
})
|
||||
LOG.warning('User %(user)s already not allowed to access '
|
||||
'%(entity_type)s %(name)s.', {
|
||||
'entity_type': entity_type,
|
||||
'user': user,
|
||||
'name': name
|
||||
})
|
||||
else:
|
||||
msg = _("Could not delete access of user %(user)s to "
|
||||
"%(entity_type)s %(name)s.") % {
|
||||
'user': user,
|
||||
'name': name,
|
||||
'entity_type': entity_type,
|
||||
'user': user,
|
||||
'name': name,
|
||||
'entity_type': entity_type,
|
||||
}
|
||||
LOG.exception(msg)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
@ -312,8 +312,8 @@ class HNASSSHBackend(object):
|
||||
if now > deadline:
|
||||
command = ['tree-clone-job-abort', job_id]
|
||||
self._execute(command)
|
||||
LOG.error(_LE("Timeout in snapshot creation from "
|
||||
"source path %s.") % src_path)
|
||||
LOG.error("Timeout in snapshot creation from "
|
||||
"source path %s." % src_path)
|
||||
msg = _("Share snapshot of source path %s "
|
||||
"was not created.") % src_path
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
@ -332,7 +332,7 @@ class HNASSSHBackend(object):
|
||||
{'src': src_path,
|
||||
'dest': dest_path})
|
||||
else:
|
||||
LOG.error(_LE('Error creating snapshot of source path %s.'),
|
||||
LOG.error('Error creating snapshot of source path %s.',
|
||||
src_path)
|
||||
msg = _('Snapshot of source path %s was not '
|
||||
'created.') % src_path
|
||||
@ -345,8 +345,8 @@ class HNASSSHBackend(object):
|
||||
self._execute(command)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'Source path: Cannot access' in e.stderr:
|
||||
LOG.warning(_LW("Attempted to delete path %s "
|
||||
"but it does not exist."), path)
|
||||
LOG.warning("Attempted to delete path %s "
|
||||
"but it does not exist.", path)
|
||||
else:
|
||||
msg = _("Could not submit tree delete job to delete path "
|
||||
"%s.") % path
|
||||
@ -449,7 +449,7 @@ class HNASSSHBackend(object):
|
||||
self._execute(command)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'Source path: Cannot access' in e.stderr:
|
||||
LOG.warning(_LW("Share %s does not exist."), vvol_name)
|
||||
LOG.warning("Share %s does not exist.", vvol_name)
|
||||
else:
|
||||
msg = _("Failed to delete vvol %s.") % vvol_name
|
||||
LOG.exception(msg)
|
||||
@ -699,8 +699,8 @@ class HNASSSHBackend(object):
|
||||
LOG.debug(msg)
|
||||
raise exception.HNASDirectoryNotEmpty(msg=msg)
|
||||
elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr:
|
||||
LOG.warning(_LW("Attempted to delete path %s but it does "
|
||||
"not exist."), path)
|
||||
LOG.warning("Attempted to delete path %s but it does "
|
||||
"not exist.", path)
|
||||
elif 'Current file system invalid: VolumeNotFound' in e.stderr:
|
||||
msg = _("Command to delete empty directory %s failed due "
|
||||
"to context change.") % path
|
||||
|
@ -20,7 +20,7 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.hitachi.hsp import rest
|
||||
|
||||
@ -92,7 +92,7 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
}],
|
||||
}
|
||||
|
||||
LOG.info(_LI("Hitachi HSP Capabilities: %(data)s."),
|
||||
LOG.info("Hitachi HSP Capabilities: %(data)s.",
|
||||
{'data': data})
|
||||
super(HitachiHSPDriver, self)._update_share_stats(data)
|
||||
|
||||
@ -111,7 +111,7 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
except exception.HSPBackendException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.hsp.delete_file_system(filesystem_id)
|
||||
msg = _LE("Could not create share %s on HSP.")
|
||||
msg = ("Could not create share %s on HSP.")
|
||||
LOG.exception(msg, share['id'])
|
||||
|
||||
uri = self.hsp_host + ':/' + share['id']
|
||||
@ -133,7 +133,7 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
filesystem_id = self.hsp.get_file_system(share['id'])['id']
|
||||
hsp_share_id = self.hsp.get_share(filesystem_id)['id']
|
||||
except exception.HSPItemNotFoundException:
|
||||
LOG.info(_LI("Share %(shr)s already removed from backend."),
|
||||
LOG.info("Share %(shr)s already removed from backend.",
|
||||
{'shr': share['id']})
|
||||
|
||||
if hsp_share_id:
|
||||
@ -278,8 +278,8 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
% share['id'])
|
||||
raise exception.HSPBackendException(msg=msg)
|
||||
|
||||
LOG.info(_LI("Share %(shr_id)s successfully extended to "
|
||||
"%(shr_size)sG."),
|
||||
LOG.info("Share %(shr_id)s successfully extended to "
|
||||
"%(shr_size)sG.",
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': new_size})
|
||||
|
||||
@ -299,8 +299,8 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
raise exception.ShareShrinkingPossibleDataLoss(
|
||||
share_id=share['id'])
|
||||
|
||||
LOG.info(_LI("Share %(shr_id)s successfully shrunk to "
|
||||
"%(shr_size)sG."),
|
||||
LOG.info("Share %(shr_id)s successfully shrunk to "
|
||||
"%(shr_size)sG.",
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': new_size})
|
||||
|
||||
@ -333,8 +333,8 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
|
||||
file_system = self.hsp.get_file_system(share['id'])
|
||||
|
||||
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s."),
|
||||
LOG.info("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
@ -357,8 +357,8 @@ class HitachiHSPDriver(driver.ShareDriver):
|
||||
|
||||
self.private_storage.delete(share['id'])
|
||||
|
||||
LOG.info(_LI("The share with current path %(shr_path)s and ID "
|
||||
"%(shr_id)s is no longer being managed."),
|
||||
LOG.info("The share with current path %(shr_path)s and ID "
|
||||
"%(shr_id)s is no longer being managed.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
|
@ -27,7 +27,7 @@ import six
|
||||
|
||||
from manila.common import config
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.hpe import hpe_3par_mediator
|
||||
from manila.share import share_types
|
||||
@ -237,7 +237,7 @@ class HPE3ParShareDriver(driver.ShareDriver):
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the share driver does while starting."""
|
||||
|
||||
LOG.info(_LI("Starting share driver %(driver_name)s (%(version)s)"),
|
||||
LOG.info("Starting share driver %(driver_name)s (%(version)s)",
|
||||
{'driver_name': self.__class__.__name__,
|
||||
'version': self.VERSION})
|
||||
|
||||
@ -637,8 +637,8 @@ class HPE3ParShareDriver(driver.ShareDriver):
|
||||
|
||||
if not self._hpe3par:
|
||||
LOG.info(
|
||||
_LI("Skipping capacity and capabilities update. Setup has not "
|
||||
"completed."))
|
||||
"Skipping capacity and capabilities update. Setup has not "
|
||||
"completed.")
|
||||
else:
|
||||
for fpg in self.fpgs:
|
||||
fpg_status = self._hpe3par.get_fpg_status(fpg)
|
||||
|
@ -25,8 +25,8 @@ import six
|
||||
|
||||
from manila.data import utils as data_utils
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila import utils
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
|
||||
hpe3parclient = importutils.try_import("hpe3parclient")
|
||||
if hpe3parclient:
|
||||
@ -167,14 +167,14 @@ class HPE3ParMediator(object):
|
||||
LOG.exception(msg)
|
||||
raise exception.ShareBackendException(message=msg)
|
||||
|
||||
LOG.info(_LI("HPE3ParMediator %(version)s, "
|
||||
"hpe3parclient %(client_version)s"),
|
||||
LOG.info("HPE3ParMediator %(version)s, "
|
||||
"hpe3parclient %(client_version)s",
|
||||
{"version": self.VERSION,
|
||||
"client_version": hpe3parclient.get_version_string()})
|
||||
|
||||
try:
|
||||
wsapi_version = self._client.getWsApiVersion()['build']
|
||||
LOG.info(_LI("3PAR WSAPI %s"), wsapi_version)
|
||||
LOG.info("3PAR WSAPI %s", wsapi_version)
|
||||
except Exception as e:
|
||||
msg = (_('Failed to get 3PAR WSAPI version: %s') %
|
||||
six.text_type(e))
|
||||
@ -200,7 +200,7 @@ class HPE3ParMediator(object):
|
||||
try:
|
||||
self._client.http.unauthenticate()
|
||||
except Exception as e:
|
||||
msg = _LW("Failed to Logout from 3PAR (%(url)s) because %(err)s")
|
||||
msg = ("Failed to Logout from 3PAR (%(url)s) because %(err)s")
|
||||
LOG.warning(msg, {'url': self.hpe3par_api_url,
|
||||
'err': six.text_type(e)})
|
||||
# don't raise exception on logout()
|
||||
@ -346,8 +346,8 @@ class HPE3ParMediator(object):
|
||||
if nfs_options is None:
|
||||
nfs_options = extra_specs.get('hp3par:nfs_options')
|
||||
if nfs_options:
|
||||
msg = _LW("hp3par:nfs_options is deprecated. Use "
|
||||
"hpe3par:nfs_options instead.")
|
||||
msg = ("hp3par:nfs_options is deprecated. Use "
|
||||
"hpe3par:nfs_options instead.")
|
||||
LOG.warning(msg)
|
||||
|
||||
if nfs_options:
|
||||
@ -391,8 +391,8 @@ class HPE3ParMediator(object):
|
||||
comment=comment)
|
||||
|
||||
if 'hp3par_flash_cache' in extra_specs:
|
||||
msg = _LW("hp3par_flash_cache is deprecated. Use "
|
||||
"hpe3par_flash_cache instead.")
|
||||
msg = ("hp3par_flash_cache is deprecated. Use "
|
||||
"hpe3par_flash_cache instead.")
|
||||
LOG.warning(msg)
|
||||
|
||||
if protocol == 'nfs':
|
||||
@ -425,8 +425,8 @@ class HPE3ParMediator(object):
|
||||
if opt_value is None:
|
||||
opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt)
|
||||
if opt_value:
|
||||
msg = _LW("hp3par:smb_* is deprecated. Use "
|
||||
"hpe3par:smb_* instead.")
|
||||
msg = ("hp3par:smb_* is deprecated. Use "
|
||||
"hpe3par:smb_* instead.")
|
||||
LOG.warning(msg)
|
||||
|
||||
if opt_value:
|
||||
@ -653,10 +653,10 @@ class HPE3ParMediator(object):
|
||||
|
||||
if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username
|
||||
or not self.hpe3par_cifs_admin_access_password):
|
||||
LOG.warning(_LW("hpe3par_cifs_admin_access_username and "
|
||||
"hpe3par_cifs_admin_access_password must be "
|
||||
"provided in order for CIFS shares created from "
|
||||
"snapshots to be writable."))
|
||||
LOG.warning("hpe3par_cifs_admin_access_username and "
|
||||
"hpe3par_cifs_admin_access_password must be "
|
||||
"provided in order for CIFS shares created from "
|
||||
"snapshots to be writable.")
|
||||
return self.create_share(
|
||||
orig_project_id,
|
||||
share_id,
|
||||
@ -735,8 +735,8 @@ class HPE3ParMediator(object):
|
||||
protocol, fpg, vfs, fstore, comment)
|
||||
|
||||
except Exception as e:
|
||||
msg = _LE('Exception during mount and copy from RO snapshot '
|
||||
'to RW share: %s')
|
||||
msg = ('Exception during mount and copy from RO snapshot '
|
||||
'to RW share: %s')
|
||||
LOG.error(msg, e)
|
||||
self._delete_share(share_name, protocol, fpg, vfs, fstore)
|
||||
raise
|
||||
@ -862,8 +862,8 @@ class HPE3ParMediator(object):
|
||||
self._update_capacity_quotas(
|
||||
fstore, 0, share_size, fpg, vfs)
|
||||
except Exception as e:
|
||||
msg = _LW('Exception during cleanup of deleted '
|
||||
'share %(share)s in filestore %(fstore)s: %(e)s')
|
||||
msg = ('Exception during cleanup of deleted '
|
||||
'share %(share)s in filestore %(fstore)s: %(e)s')
|
||||
data = {
|
||||
'fstore': fstore,
|
||||
'share': share_name,
|
||||
@ -878,10 +878,10 @@ class HPE3ParMediator(object):
|
||||
# return out and log a warning.
|
||||
if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username
|
||||
or not self.hpe3par_cifs_admin_access_password):
|
||||
LOG.warning(_LW("hpe3par_cifs_admin_access_username and "
|
||||
"hpe3par_cifs_admin_access_password must be "
|
||||
"provided in order for the file tree to be "
|
||||
"properly deleted."))
|
||||
LOG.warning("hpe3par_cifs_admin_access_username and "
|
||||
"hpe3par_cifs_admin_access_password must be "
|
||||
"provided in order for the file tree to be "
|
||||
"properly deleted.")
|
||||
return
|
||||
|
||||
mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name)
|
||||
@ -978,8 +978,8 @@ class HPE3ParMediator(object):
|
||||
try:
|
||||
utils.execute('mkdir', mount_location, run_as_root=True)
|
||||
except Exception as err:
|
||||
message = (_LW("There was an error creating mount directory: "
|
||||
"%s. The nested file tree will not be deleted."),
|
||||
message = ("There was an error creating mount directory: "
|
||||
"%s. The nested file tree will not be deleted.",
|
||||
six.text_type(err))
|
||||
LOG.warning(message)
|
||||
|
||||
@ -1004,8 +1004,8 @@ class HPE3ParMediator(object):
|
||||
protocol, fpg, vfs, fstore, share_ip)
|
||||
self._mount_share(protocol, mount_location, mount_dir)
|
||||
except Exception as err:
|
||||
message = (_LW("There was an error mounting the super share: "
|
||||
"%s. The nested file tree will not be deleted."),
|
||||
message = ("There was an error mounting the super share: "
|
||||
"%s. The nested file tree will not be deleted.",
|
||||
six.text_type(err))
|
||||
LOG.warning(message)
|
||||
|
||||
@ -1013,8 +1013,8 @@ class HPE3ParMediator(object):
|
||||
try:
|
||||
utils.execute('umount', mount_location, run_as_root=True)
|
||||
except Exception as err:
|
||||
message = _LW("There was an error unmounting the share at "
|
||||
"%(mount_location)s: %(error)s")
|
||||
message = ("There was an error unmounting the share at "
|
||||
"%(mount_location)s: %(error)s")
|
||||
msg_data = {
|
||||
'mount_location': mount_location,
|
||||
'error': six.text_type(err),
|
||||
@ -1025,8 +1025,8 @@ class HPE3ParMediator(object):
|
||||
try:
|
||||
utils.execute('rm', '-rf', directory, run_as_root=True)
|
||||
except Exception as err:
|
||||
message = (_LW("There was an error removing the share: "
|
||||
"%s. The nested file tree will not be deleted."),
|
||||
message = ("There was an error removing the share: "
|
||||
"%s. The nested file tree will not be deleted.",
|
||||
six.text_type(err))
|
||||
LOG.warning(message)
|
||||
|
||||
@ -1212,8 +1212,8 @@ class HPE3ParMediator(object):
|
||||
self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed')
|
||||
except Exception:
|
||||
# Remove already happened so only log this.
|
||||
LOG.exception(_LE('Unexpected exception calling startfsnapclean '
|
||||
'for FPG %(fpg)s.'), {'fpg': fpg})
|
||||
LOG.exception('Unexpected exception calling startfsnapclean '
|
||||
'for FPG %(fpg)s.', {'fpg': fpg})
|
||||
|
||||
@staticmethod
|
||||
def _validate_access_type(protocol, access_type):
|
||||
|
@ -17,7 +17,6 @@ import copy
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from manila.i18n import _LE
|
||||
from manila.share.drivers.huawei import constants
|
||||
from manila.share import share_types
|
||||
|
||||
@ -66,8 +65,8 @@ def _get_opts_from_specs(specs):
|
||||
words = value.split()
|
||||
|
||||
if not (words and len(words) == 2 and words[0] == '<is>'):
|
||||
LOG.error(_LE("Extra specs must be specified as "
|
||||
"capabilities:%s='<is> True'."), key)
|
||||
LOG.error("Extra specs must be specified as "
|
||||
"capabilities:%s='<is> True'.", key)
|
||||
else:
|
||||
opts[key] = words[1].lower()
|
||||
|
||||
|
@ -31,7 +31,7 @@ import six
|
||||
from manila.common import constants as common_constants
|
||||
from manila.data import utils as data_utils
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila import rpc
|
||||
from manila.share.drivers.huawei import base as driver
|
||||
from manila.share.drivers.huawei import constants
|
||||
@ -275,7 +275,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
snapshot_name = "share_snapshot_" + snap_name
|
||||
snap_id = self.helper._create_snapshot(sharefsid,
|
||||
snapshot_name)
|
||||
LOG.info(_LI('Creating snapshot id %s.'), snap_id)
|
||||
LOG.info('Creating snapshot id %s.', snap_id)
|
||||
return snapshot_name.replace("-", "_")
|
||||
|
||||
def delete_snapshot(self, snapshot, share_server=None):
|
||||
@ -286,8 +286,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
sharefsid = self.helper.get_fsid_by_name(snapshot['share_name'])
|
||||
|
||||
if sharefsid is None:
|
||||
LOG.warning(_LW('Delete snapshot share id %s fs has been '
|
||||
'deleted.'), snap_name)
|
||||
LOG.warning('Delete snapshot share id %s fs has been '
|
||||
'deleted.', snap_name)
|
||||
return
|
||||
|
||||
snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name)
|
||||
@ -297,7 +297,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
if snapshot_flag:
|
||||
self.helper._delete_snapshot(snapshot_id)
|
||||
else:
|
||||
LOG.warning(_LW("Can not find snapshot %s on array."), snap_name)
|
||||
LOG.warning("Can not find snapshot %s on array.", snap_name)
|
||||
|
||||
def update_share_stats(self, stats_dict):
|
||||
"""Retrieve status info from share group."""
|
||||
@ -358,13 +358,13 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
share = self.helper._get_share_by_name(share_name, share_url_type)
|
||||
|
||||
if not share:
|
||||
LOG.warning(_LW('The share was not found. Share name:%s'),
|
||||
LOG.warning('The share was not found. Share name:%s',
|
||||
share_name)
|
||||
fsid = self.helper.get_fsid_by_name(share_name)
|
||||
if fsid:
|
||||
self.helper._delete_fs(fsid)
|
||||
return
|
||||
LOG.warning(_LW('The filesystem was not found.'))
|
||||
LOG.warning('The filesystem was not found.')
|
||||
return
|
||||
|
||||
share_id = share['ID']
|
||||
@ -452,8 +452,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
os.rmdir(item['mount_src'])
|
||||
except Exception as err:
|
||||
LOG.warning(_LW('Failed to remove temp file. File path: '
|
||||
'%(file_path)s. Reason: %(err)s.'),
|
||||
LOG.warning('Failed to remove temp file. File path:'
|
||||
'%(file_path)s. Reason: %(err)s.',
|
||||
{'file_path': item['mount_src'],
|
||||
'err': err})
|
||||
|
||||
@ -467,8 +467,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
self.allow_access(old_share, old_access)
|
||||
except exception.ManilaException as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to add access to share %(name)s. '
|
||||
'Reason: %(err)s.'),
|
||||
LOG.error('Failed to add access to share %(name)s. '
|
||||
'Reason: %(err)s.',
|
||||
{'name': old_share['name'],
|
||||
'err': err})
|
||||
|
||||
@ -478,8 +478,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
self.mount_share_to_host(old_share, old_access)
|
||||
except exception.ShareMountException as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to mount old share %(name)s. '
|
||||
'Reason: %(err)s.'),
|
||||
LOG.error('Failed to mount old share %(name)s. '
|
||||
'Reason: %(err)s.',
|
||||
{'name': old_share['name'],
|
||||
'err': err})
|
||||
|
||||
@ -489,8 +489,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.umount_share_from_host(old_share)
|
||||
LOG.error(_LE('Failed to mount new share %(name)s. '
|
||||
'Reason: %(err)s.'),
|
||||
LOG.error('Failed to mount new share %(name)s. '
|
||||
'Reason: %(err)s.',
|
||||
{'name': new_share['name'],
|
||||
'err': err})
|
||||
|
||||
@ -500,8 +500,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
self.umount_share_from_host(item)
|
||||
except exception.ShareUmountException as err:
|
||||
LOG.warning(_LW('Failed to unmount share %(name)s. '
|
||||
'Reason: %(err)s.'),
|
||||
LOG.warning('Failed to unmount share %(name)s. '
|
||||
'Reason: %(err)s.',
|
||||
{'name': item['name'],
|
||||
'err': err})
|
||||
|
||||
@ -573,7 +573,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
if copy.get_progress()['total_progress'] == 100:
|
||||
copy_finish = True
|
||||
except Exception as err:
|
||||
LOG.error(_LE("Failed to copy data, reason: %s."), err)
|
||||
LOG.error("Failed to copy data, reason: %s.", err)
|
||||
|
||||
return copy_finish
|
||||
|
||||
@ -695,12 +695,12 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
share_url_type = self.helper._get_share_url_type(share_proto)
|
||||
access_type = access['access_type']
|
||||
if share_proto == 'NFS' and access_type not in ('ip', 'user'):
|
||||
LOG.warning(_LW('Only IP or USER access types are allowed for '
|
||||
'NFS shares.'))
|
||||
LOG.warning('Only IP or USER access types are allowed for '
|
||||
'NFS shares.')
|
||||
return
|
||||
elif share_proto == 'CIFS' and access_type != 'user':
|
||||
LOG.warning(_LW('Only USER access type is allowed for'
|
||||
' CIFS shares.'))
|
||||
LOG.warning('Only USER access type is allowed for'
|
||||
' CIFS shares.')
|
||||
return
|
||||
|
||||
access_to = access['access_to']
|
||||
@ -710,14 +710,14 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
access_to = '*'
|
||||
share = self.helper._get_share_by_name(share_name, share_url_type)
|
||||
if not share:
|
||||
LOG.warning(_LW('Can not get share %s.'), share_name)
|
||||
LOG.warning('Can not get share %s.', share_name)
|
||||
return
|
||||
|
||||
access_id = self.helper._get_access_from_share(share['ID'], access_to,
|
||||
share_proto)
|
||||
if not access_id:
|
||||
LOG.warning(_LW('Can not get access id from share. '
|
||||
'share_name: %s'), share_name)
|
||||
LOG.warning('Can not get access id from share. '
|
||||
'share_name: %s', share_name)
|
||||
return
|
||||
|
||||
self.helper._remove_access_from_share(access_id, share_proto)
|
||||
@ -798,7 +798,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
share_url_type = self.helper._get_share_url_type(share_proto)
|
||||
share_stor = self.helper._get_share_by_name(share_name, share_url_type)
|
||||
if not share_stor:
|
||||
LOG.warning(_LW('Cannot get share %s.'), share_name)
|
||||
LOG.warning('Cannot get share %s.', share_name)
|
||||
return
|
||||
share_id = share_stor['ID']
|
||||
all_accesses = self.helper._get_all_access_from_share(share_id,
|
||||
@ -920,8 +920,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
opts['thin_provisioning'] = constants.THICK_PROVISIONING
|
||||
|
||||
change_opts = self.check_retype_change_opts(opts, poolinfo, fs)
|
||||
LOG.info(_LI('Retyping share (%(share)s), changed options are : '
|
||||
'(%(change_opts)s).'),
|
||||
LOG.info('Retyping share (%(share)s), changed options are : '
|
||||
'(%(change_opts)s).',
|
||||
{'share': old_share_name, 'change_opts': change_opts})
|
||||
try:
|
||||
self.retype_share(change_opts, fs_id)
|
||||
@ -1198,9 +1198,9 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
if wait_interval:
|
||||
return int(wait_interval)
|
||||
else:
|
||||
LOG.info(_LI(
|
||||
LOG.info(
|
||||
"Wait interval is not configured in huawei "
|
||||
"conf file. Use default: %(default_wait_interval)d."),
|
||||
"conf file. Use default: %(default_wait_interval)d.",
|
||||
{"default_wait_interval": constants.DEFAULT_WAIT_INTERVAL})
|
||||
return constants.DEFAULT_WAIT_INTERVAL
|
||||
|
||||
@ -1211,9 +1211,9 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
if timeout:
|
||||
return int(timeout)
|
||||
else:
|
||||
LOG.info(_LI(
|
||||
LOG.info(
|
||||
"Timeout is not configured in huawei conf file. "
|
||||
"Use default: %(default_timeout)d."),
|
||||
"Use default: %(default_timeout)d.",
|
||||
{"default_timeout": constants.DEFAULT_TIMEOUT})
|
||||
return constants.DEFAULT_TIMEOUT
|
||||
|
||||
@ -1736,8 +1736,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
remote_fs_id=self.helper.get_fsid_by_name(new_share_name)
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to create a replication pair '
|
||||
'with host %s.'),
|
||||
LOG.exception('Failed to create a replication pair '
|
||||
'with host %s.',
|
||||
active_replica['host'])
|
||||
raise
|
||||
|
||||
@ -1760,7 +1760,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
replica_pair_id = self.private_storage.get(replica['share_id'],
|
||||
'replica_pair_id')
|
||||
if replica_pair_id is None:
|
||||
msg = _LE("No replication pair ID recorded for share %s.")
|
||||
msg = ("No replication pair ID recorded for share %s.")
|
||||
LOG.error(msg, replica['share_id'])
|
||||
return common_constants.STATUS_ERROR
|
||||
|
||||
@ -1780,7 +1780,7 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
self.replica_mgr.switch_over(replica_pair_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to promote replica %s.'),
|
||||
LOG.exception('Failed to promote replica %s.',
|
||||
replica['id'])
|
||||
raise
|
||||
|
||||
@ -1790,8 +1790,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
self.update_access(replica, access_rules, [], [], share_server)
|
||||
except Exception:
|
||||
LOG.warning(_LW('Failed to set access rules to '
|
||||
'new active replica %s.'),
|
||||
LOG.warning('Failed to set access rules to '
|
||||
'new active replica %s.',
|
||||
replica['id'])
|
||||
updated_new_active_access = False
|
||||
|
||||
@ -1800,8 +1800,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
self.clear_access(old_active_replica, share_server)
|
||||
except Exception:
|
||||
LOG.warning(_LW("Failed to clear access rules from "
|
||||
"old active replica %s."),
|
||||
LOG.warning("Failed to clear access rules from "
|
||||
"old active replica %s.",
|
||||
old_active_replica['id'])
|
||||
cleared_old_active_access = False
|
||||
|
||||
@ -1833,8 +1833,8 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
replica_pair_id = self.private_storage.get(replica['share_id'],
|
||||
'replica_pair_id')
|
||||
if replica_pair_id is None:
|
||||
msg = _LW("No replication pair ID recorded for share %(share)s. "
|
||||
"Continue to delete replica %(replica)s.")
|
||||
msg = ("No replication pair ID recorded for share %(share)s. "
|
||||
"Continue to delete replica %(replica)s.")
|
||||
LOG.warning(msg, {'share': replica['share_id'],
|
||||
'replica': replica['id']})
|
||||
else:
|
||||
@ -1844,6 +1844,6 @@ class V3StorageConnection(driver.HuaweiBase):
|
||||
try:
|
||||
self.delete_share(replica, share_server)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to delete replica %s.'),
|
||||
LOG.exception('Failed to delete replica %s.',
|
||||
replica['id'])
|
||||
raise
|
||||
|
@ -25,7 +25,7 @@ from six.moves import http_cookiejar
|
||||
from six.moves.urllib import request as urlreq # pylint: disable=E0611
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.huawei import constants
|
||||
from manila import utils
|
||||
|
||||
@ -77,8 +77,8 @@ class RestHelper(object):
|
||||
LOG.debug('Response Data: %(res)s.', {'res': res})
|
||||
|
||||
except Exception as err:
|
||||
LOG.error(_LE('\nBad response from server: %(url)s.'
|
||||
' Error: %(err)s'), {'url': url, 'err': err})
|
||||
LOG.error('\nBad response from server: %(url)s.'
|
||||
' Error: %(err)s', {'url': url, 'err': err})
|
||||
res = '{"error":{"code":%s,' \
|
||||
'"description":"Connect server error"}}' \
|
||||
% constants.ERROR_CONNECT_TO_SERVER
|
||||
@ -110,7 +110,7 @@ class RestHelper(object):
|
||||
if((result['error']['code'] != 0)
|
||||
or ("data" not in result)
|
||||
or (result['data']['deviceid'] is None)):
|
||||
LOG.error(_LE("Login to %s failed, try another."), item_url)
|
||||
LOG.error("Login to %s failed, try another.", item_url)
|
||||
continue
|
||||
|
||||
LOG.debug('Login success: %(url)s\n',
|
||||
@ -139,7 +139,7 @@ class RestHelper(object):
|
||||
error_code = result['error']['code']
|
||||
if(error_code == constants.ERROR_CONNECT_TO_SERVER
|
||||
or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER):
|
||||
LOG.error(_LE("Can't open the recent url, re-login."))
|
||||
LOG.error("Can't open the recent url, re-login.")
|
||||
deviceid = self.login()
|
||||
|
||||
if deviceid is not None:
|
||||
@ -214,7 +214,7 @@ class RestHelper(object):
|
||||
utils.execute('chmod', '666', filepath, run_as_root=True)
|
||||
|
||||
except Exception as err:
|
||||
LOG.error(_LE('Bad response from change file: %s.') % err)
|
||||
LOG.error('Bad response from change file: %s.' % err)
|
||||
raise
|
||||
|
||||
def create_share(self, share_name, fs_id, share_proto):
|
||||
@ -1151,8 +1151,8 @@ class RestHelper(object):
|
||||
url = "/vlan/" + vlan_id
|
||||
result = self.call(url, None, 'DELETE')
|
||||
if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST:
|
||||
LOG.warning(_LW('Cannot delete vlan because there is '
|
||||
'a logical port on vlan.'))
|
||||
LOG.warning('Cannot delete vlan because there is '
|
||||
'a logical port on vlan.')
|
||||
return
|
||||
|
||||
self._assert_rest_result(result, _('Delete vlan error.'))
|
||||
@ -1402,7 +1402,7 @@ class RestHelper(object):
|
||||
|
||||
if (result['error']['code'] ==
|
||||
constants.ERROR_REPLICATION_PAIR_NOT_EXIST):
|
||||
LOG.warning(_LW('Replication pair %s was not found.'),
|
||||
LOG.warning('Replication pair %s was not found.',
|
||||
pair_id)
|
||||
return
|
||||
|
||||
|
@ -18,7 +18,7 @@ from oslo_utils import strutils
|
||||
|
||||
from manila.common import constants as common_constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.huawei import constants
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ class ReplicaPairManager(object):
|
||||
|
||||
pair_info = self.helper.create_replication_pair(pair_params)
|
||||
except Exception:
|
||||
msg = _LE("Failed to create replication pair for share %s.")
|
||||
msg = ("Failed to create replication pair for share %s.")
|
||||
LOG.exception(msg, local_share_name)
|
||||
raise
|
||||
|
||||
@ -69,8 +69,8 @@ class ReplicaPairManager(object):
|
||||
pair_info = self.helper.get_replication_pair_by_id(
|
||||
replica_pair_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to get replication pair info for '
|
||||
'%s.'), replica_pair_id)
|
||||
LOG.exception('Failed to get replication pair info for '
|
||||
'%s.', replica_pair_id)
|
||||
raise
|
||||
|
||||
return pair_info
|
||||
@ -114,7 +114,7 @@ class ReplicaPairManager(object):
|
||||
pair_info = self._get_replication_pair_info(replica_pair_id)
|
||||
except Exception:
|
||||
# if cannot communicate to backend, return error
|
||||
LOG.error(_LE('Cannot get replica state, return %s'),
|
||||
LOG.error('Cannot get replica state, return %s',
|
||||
common_constants.STATUS_ERROR)
|
||||
return common_constants.STATUS_ERROR
|
||||
|
||||
@ -124,8 +124,8 @@ class ReplicaPairManager(object):
|
||||
try:
|
||||
self.helper.sync_replication_pair(pair_id)
|
||||
except Exception as err:
|
||||
LOG.warning(_LW('Failed to sync replication pair %(id)s. '
|
||||
'Reason: %(err)s'),
|
||||
LOG.warning('Failed to sync replication pair %(id)s. '
|
||||
'Reason: %(err)s',
|
||||
{'id': pair_id, 'err': err})
|
||||
|
||||
def update_replication_pair_state(self, replica_pair_id):
|
||||
@ -133,8 +133,8 @@ class ReplicaPairManager(object):
|
||||
|
||||
health = self._check_replication_health(pair_info)
|
||||
if health is not None:
|
||||
LOG.warning(_LW("Cannot update the replication %s "
|
||||
"because it's not in normal status."),
|
||||
LOG.warning("Cannot update the replication %s "
|
||||
"because it's not in normal status.",
|
||||
replica_pair_id)
|
||||
return
|
||||
|
||||
@ -145,9 +145,9 @@ class ReplicaPairManager(object):
|
||||
try:
|
||||
self.helper.switch_replication_pair(replica_pair_id)
|
||||
except Exception:
|
||||
msg = _LE('Replication pair %s primary/secondary '
|
||||
'relationship is not right, try to switch over '
|
||||
'again but still failed.')
|
||||
msg = ('Replication pair %s primary/secondary '
|
||||
'relationship is not right, try to switch over '
|
||||
'again but still failed.')
|
||||
LOG.exception(msg, replica_pair_id)
|
||||
return
|
||||
|
||||
@ -158,8 +158,8 @@ class ReplicaPairManager(object):
|
||||
try:
|
||||
self.helper.set_pair_secondary_write_lock(replica_pair_id)
|
||||
except Exception:
|
||||
msg = _LE('Replication pair %s secondary access is R/W, '
|
||||
'try to set write lock but still failed.')
|
||||
msg = ('Replication pair %s secondary access is R/W, '
|
||||
'try to set write lock but still failed.')
|
||||
LOG.exception(msg, replica_pair_id)
|
||||
return
|
||||
|
||||
@ -173,8 +173,8 @@ class ReplicaPairManager(object):
|
||||
pair_info = self._get_replication_pair_info(replica_pair_id)
|
||||
|
||||
if strutils.bool_from_string(pair_info['ISPRIMARY']):
|
||||
LOG.warning(_LW('The replica to promote is already primary, '
|
||||
'no need to switch over.'))
|
||||
LOG.warning('The replica to promote is already primary, '
|
||||
'no need to switch over.')
|
||||
return
|
||||
|
||||
replica_state = self._check_replica_state(pair_info)
|
||||
@ -192,14 +192,14 @@ class ReplicaPairManager(object):
|
||||
# means replication pair is in an abnormal status,
|
||||
# ignore this exception, continue to cancel secondary write lock,
|
||||
# let secondary share accessible for disaster recovery.
|
||||
LOG.exception(_LE('Failed to split replication pair %s while '
|
||||
'switching over.'), replica_pair_id)
|
||||
LOG.exception('Failed to split replication pair %s while '
|
||||
'switching over.', replica_pair_id)
|
||||
|
||||
try:
|
||||
self.helper.cancel_pair_secondary_write_lock(replica_pair_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to cancel replication pair %s '
|
||||
'secondary write lock.'), replica_pair_id)
|
||||
LOG.exception('Failed to cancel replication pair %s '
|
||||
'secondary write lock.', replica_pair_id)
|
||||
raise
|
||||
|
||||
try:
|
||||
@ -207,8 +207,8 @@ class ReplicaPairManager(object):
|
||||
self.helper.set_pair_secondary_write_lock(replica_pair_id)
|
||||
self.helper.sync_replication_pair(replica_pair_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to completely switch over '
|
||||
'replication pair %s.'), replica_pair_id)
|
||||
LOG.exception('Failed to completely switch over '
|
||||
'replication pair %s.', replica_pair_id)
|
||||
|
||||
# for all the rest steps,
|
||||
# because secondary share is accessible now,
|
||||
@ -222,15 +222,15 @@ class ReplicaPairManager(object):
|
||||
except Exception:
|
||||
# Ignore this exception because replication pair may at some
|
||||
# abnormal status that supports deleting.
|
||||
LOG.warning(_LW('Failed to split replication pair %s '
|
||||
'before deleting it. Ignore this exception, '
|
||||
'and try to delete anyway.'),
|
||||
LOG.warning('Failed to split replication pair %s '
|
||||
'before deleting it. Ignore this exception, '
|
||||
'and try to delete anyway.',
|
||||
replica_pair_id)
|
||||
|
||||
try:
|
||||
self.helper.delete_replication_pair(replica_pair_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to delete replication pair %s.'),
|
||||
LOG.exception('Failed to delete replication pair %s.',
|
||||
replica_pair_id)
|
||||
raise
|
||||
|
||||
|
@ -44,7 +44,7 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.helpers import NFSHelper
|
||||
from manila.share import share_types
|
||||
@ -684,8 +684,8 @@ class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
|
||||
msg = _('Failed to set quota for share %s.') % new_share_name
|
||||
LOG.exception(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
LOG.info(_LI('Existing share %(shr)s has size %(size)s KB '
|
||||
'which is below 1GiB, so extended it to 1GiB.') %
|
||||
LOG.info('Existing share %(shr)s has size %(size)s KB '
|
||||
'which is below 1GiB, so extended it to 1GiB.' %
|
||||
{'shr': new_share_name, 'size': share_size})
|
||||
share_size = 1
|
||||
else:
|
||||
|
@ -28,7 +28,7 @@ from oslo_utils import importutils
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers import generic
|
||||
from manila.share import utils
|
||||
@ -105,9 +105,9 @@ class LVMMixin(driver.ExecuteMixin):
|
||||
share_name), run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if "not found" not in exc.stderr:
|
||||
LOG.exception(_LE("Error deleting volume"))
|
||||
LOG.exception("Error deleting volume")
|
||||
raise
|
||||
LOG.warning(_LW("Volume not found: %s") % exc.stderr)
|
||||
LOG.warning("Volume not found: %s" % exc.stderr)
|
||||
|
||||
def _create_snapshot(self, context, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
@ -253,12 +253,12 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
if 'device is busy' in six.text_type(exc):
|
||||
raise exception.ShareBusyException(reason=share['name'])
|
||||
else:
|
||||
LOG.info(_LI('Unable to umount: %s'), exc)
|
||||
LOG.info('Unable to umount: %s', exc)
|
||||
# remove dir
|
||||
try:
|
||||
os.rmdir(mount_path)
|
||||
except OSError:
|
||||
LOG.warning(_LW('Unable to delete %s'), mount_path)
|
||||
LOG.warning('Unable to delete %s', mount_path)
|
||||
|
||||
def ensure_share(self, ctx, share, share_server=None):
|
||||
"""Ensure that storage are mounted and exported."""
|
||||
@ -273,7 +273,7 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
self._get_helper(share).remove_exports(
|
||||
self.share_server, share['name'])
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.warning(_LW("Can't remove share %r"), share['id'])
|
||||
LOG.warning("Can't remove share %r", share['id'])
|
||||
except exception.InvalidShare as exc:
|
||||
LOG.warning(exc.message)
|
||||
|
||||
@ -326,7 +326,7 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
except exception.ProcessExecutionError:
|
||||
out, err = self._execute('mount', '-l', run_as_root=True)
|
||||
if device_name in out:
|
||||
LOG.warning(_LW("%s is already mounted"), device_name)
|
||||
LOG.warning("%s is already mounted", device_name)
|
||||
else:
|
||||
raise
|
||||
return mount_path
|
||||
|
@ -27,7 +27,6 @@ import six
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -71,13 +70,13 @@ class BaseDriverUtil(object):
|
||||
if self._check_error(e):
|
||||
raise
|
||||
elif x < len(self.hosts) - 1:
|
||||
msg = _LE('Error running SSH command. Trying another host')
|
||||
msg = ('Error running SSH command. Trying another host')
|
||||
LOG.error(msg)
|
||||
else:
|
||||
raise
|
||||
except Exception as e:
|
||||
if x < len(self.hosts) - 1:
|
||||
msg = _LE('Error running SSH command. Trying another host')
|
||||
msg = ('Error running SSH command. Trying another host')
|
||||
LOG.error(msg)
|
||||
else:
|
||||
raise exception.ProcessExecutionError(six.text_type(e))
|
||||
|
@ -26,7 +26,7 @@ from oslo_utils import units
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import api
|
||||
from manila.share import driver
|
||||
|
||||
@ -258,8 +258,8 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
"""Deletes share storage."""
|
||||
volume_name = self._get_volume_name(context, share)
|
||||
if volume_name == "error":
|
||||
LOG.info(_LI("Skipping deleting share with name %s, as it does not"
|
||||
" exist on the backend"), share['name'])
|
||||
LOG.info("Skipping deleting share with name %s, as it does not"
|
||||
" exist on the backend", share['name'])
|
||||
return
|
||||
try:
|
||||
self._maprfs_util.delete_volume(volume_name)
|
||||
@ -295,7 +295,7 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
# method shouldn`t raise exception if share does
|
||||
# not exist actually
|
||||
if not self._maprfs_util.volume_exists(volume_name):
|
||||
LOG.warning(_LW('Can not get share %s.'), share['name'])
|
||||
LOG.warning('Can not get share %s.', share['name'])
|
||||
return
|
||||
# check update
|
||||
if add_rules or delete_rules:
|
||||
@ -337,10 +337,10 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
raise exception.MapRFSException(msg=msg)
|
||||
|
||||
if not self.configuration.maprfs_cldb_ip:
|
||||
LOG.warning(_LW('CLDB nodes are not specified!'))
|
||||
LOG.warning('CLDB nodes are not specified!')
|
||||
|
||||
if not self.configuration.maprfs_zookeeper_ip:
|
||||
LOG.warning(_LW('Zookeeper nodes are not specified!'))
|
||||
LOG.warning('Zookeeper nodes are not specified!')
|
||||
|
||||
if not self._check_maprfs_state():
|
||||
msg = _('MapR-FS is not in healthy state.')
|
||||
@ -383,7 +383,7 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
location = self._get_share_export_locations(share, path=share_path)
|
||||
if size == 0:
|
||||
size = used
|
||||
msg = _LW(
|
||||
msg = (
|
||||
'Share %s has no size quota. Total used value will be'
|
||||
' used as share size')
|
||||
LOG.warning(msg, share['name'])
|
||||
|
@ -21,7 +21,7 @@ from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.netapp import options
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
@ -69,7 +69,7 @@ class NetAppDriver(object):
|
||||
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
|
||||
|
||||
app_version = na_utils.OpenStackInfo().info()
|
||||
LOG.info(_LI('OpenStack OS Version Info: %s'), app_version)
|
||||
LOG.info('OpenStack OS Version Info: %s', app_version)
|
||||
kwargs['app_version'] = app_version
|
||||
|
||||
driver_mode = NetAppDriver._get_driver_mode(
|
||||
@ -107,8 +107,8 @@ class NetAppDriver(object):
|
||||
storage_family = storage_family.lower()
|
||||
|
||||
fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
|
||||
LOG.info(_LI('Requested unified config: %(storage_family)s and '
|
||||
'%(driver_mode)s.') % fmt)
|
||||
LOG.info('Requested unified config: %(storage_family)s and '
|
||||
'%(driver_mode)s.' % fmt)
|
||||
|
||||
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
|
||||
if family_meta is None:
|
||||
@ -124,6 +124,6 @@ class NetAppDriver(object):
|
||||
|
||||
kwargs['netapp_mode'] = 'proxy'
|
||||
driver = importutils.import_object(driver_loc, *args, **kwargs)
|
||||
LOG.info(_LI('NetApp driver of family %(storage_family)s and mode '
|
||||
'%(driver_mode)s loaded.') % fmt)
|
||||
LOG.info('NetApp driver of family %(storage_family)s and mode '
|
||||
'%(driver_mode)s loaded.' % fmt)
|
||||
return driver
|
||||
|
@ -16,7 +16,6 @@
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from manila.i18n import _LE
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
|
||||
@ -89,7 +88,7 @@ class NetAppBaseClient(object):
|
||||
result = self.send_request('license-v2-list-info')
|
||||
except netapp_api.NaApiError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Could not get licenses list."))
|
||||
LOG.exception("Could not get licenses list.")
|
||||
|
||||
return sorted(
|
||||
[l.get_child_content('package').lower()
|
||||
|
@ -26,7 +26,7 @@ from oslo_utils import units
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.client import client_base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
@ -327,7 +327,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
Offlines and destroys root volumes. Deletes Vserver.
|
||||
"""
|
||||
if not self.vserver_exists(vserver_name):
|
||||
LOG.error(_LE("Vserver %s does not exist."), vserver_name)
|
||||
LOG.error("Vserver %s does not exist.", vserver_name)
|
||||
return
|
||||
|
||||
root_volume_name = self.get_vserver_root_volume_name(vserver_name)
|
||||
@ -338,7 +338,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
vserver_client.offline_volume(root_volume_name)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code == netapp_api.EVOLUMEOFFLINE:
|
||||
LOG.error(_LE("Volume %s is already offline."),
|
||||
LOG.error("Volume %s is already offline.",
|
||||
root_volume_name)
|
||||
else:
|
||||
raise
|
||||
@ -367,8 +367,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
vserver_client.send_request('cifs-server-delete', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code == netapp_api.EOBJECTNOTFOUND:
|
||||
LOG.error(_LE('CIFS server does not exist for '
|
||||
'Vserver %s.'), vserver_name)
|
||||
LOG.error('CIFS server does not exist for '
|
||||
'Vserver %s.', vserver_name)
|
||||
else:
|
||||
vserver_client.send_request('cifs-server-delete')
|
||||
|
||||
@ -1069,7 +1069,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
vserver_aggr_info_list = vserver_aggr_info_element.get_children()
|
||||
|
||||
if not vserver_aggr_info_list:
|
||||
LOG.warning(_LW('No aggregates assigned to Vserver %s.'),
|
||||
LOG.warning('No aggregates assigned to Vserver %s.',
|
||||
vserver_name)
|
||||
|
||||
# Return dict of key-value pair of aggr_name:aggr_size_available.
|
||||
@ -1387,7 +1387,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
self.send_request('net-dns-create', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code == netapp_api.EDUPLICATEENTRY:
|
||||
LOG.error(_LE("DNS exists for Vserver."))
|
||||
LOG.error("DNS exists for Vserver.")
|
||||
else:
|
||||
msg = _("Failed to configure DNS. %s")
|
||||
raise exception.NetAppException(msg % e.message)
|
||||
@ -2027,8 +2027,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
return
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code == netapp_api.EAPIERROR and 'job ID' in e.message:
|
||||
msg = _LW('Could not unmount volume %(volume)s due to '
|
||||
'ongoing volume operation: %(exception)s')
|
||||
msg = ('Could not unmount volume %(volume)s due to '
|
||||
'ongoing volume operation: %(exception)s')
|
||||
msg_args = {'volume': volume_name, 'exception': e}
|
||||
LOG.warning(msg, msg_args)
|
||||
time.sleep(retry_interval)
|
||||
@ -2642,7 +2642,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
node_client.send_request('ems-autosupport-log', message_dict)
|
||||
LOG.debug('EMS executed successfully.')
|
||||
except netapp_api.NaApiError as e:
|
||||
LOG.warning(_LW('Failed to invoke EMS. %s') % e)
|
||||
LOG.warning('Failed to invoke EMS. %s' % e)
|
||||
|
||||
@na_utils.trace
|
||||
def get_aggregate(self, aggregate_name):
|
||||
@ -3276,8 +3276,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
|
||||
has_snapmirrors = len(snapmirrors) > 0
|
||||
except netapp_api.NaApiError:
|
||||
msg = _LE("Could not determine if volume %s is part of "
|
||||
"existing snapmirror relationships.")
|
||||
msg = ("Could not determine if volume %s is part of "
|
||||
"existing snapmirror relationships.")
|
||||
LOG.exception(msg, volume['name'])
|
||||
has_snapmirrors = False
|
||||
|
||||
|
@ -25,7 +25,7 @@ from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import configuration
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
@ -196,7 +196,7 @@ class DataMotionSession(object):
|
||||
if (e.code == netapp_api.EOBJECTNOTFOUND or
|
||||
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
|
||||
"(entry doesn't exist)" in e.message):
|
||||
LOG.info(_LI('No snapmirror relationship to delete'))
|
||||
LOG.info('No snapmirror relationship to delete')
|
||||
exc_context.reraise = False
|
||||
|
||||
if release:
|
||||
@ -267,7 +267,7 @@ class DataMotionSession(object):
|
||||
)[0]
|
||||
if snapmirror.get('relationship-status') != 'quiesced':
|
||||
raise exception.ReplicationException(
|
||||
reason=_LE("Snapmirror relationship is not quiesced."))
|
||||
reason=("Snapmirror relationship is not quiesced."))
|
||||
|
||||
try:
|
||||
wait_for_quiesced()
|
||||
|
@ -33,7 +33,7 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
@ -158,11 +158,11 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
'backend': self._backend_name,
|
||||
'licenses': ', '.join(self._licenses),
|
||||
}
|
||||
LOG.info(_LI('Available licenses on %(backend)s '
|
||||
'are %(licenses)s.'), log_data)
|
||||
LOG.info('Available licenses on %(backend)s '
|
||||
'are %(licenses)s.', log_data)
|
||||
|
||||
if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
|
||||
msg = _LE('Neither NFS nor CIFS is licensed on %(backend)s')
|
||||
msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
|
||||
msg_args = {'backend': self._backend_name}
|
||||
LOG.error(msg % msg_args)
|
||||
|
||||
@ -657,9 +657,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
except (exception.InvalidInput,
|
||||
exception.VserverNotSpecified,
|
||||
exception.VserverNotFound) as error:
|
||||
LOG.warning(_LW("Could not determine share server for share being "
|
||||
"deleted: %(share)s. Deletion of share record "
|
||||
"will proceed anyway. Error: %(error)s"),
|
||||
LOG.warning("Could not determine share server for share being "
|
||||
"deleted: %(share)s. Deletion of share record "
|
||||
"will proceed anyway. Error: %(error)s",
|
||||
{'share': share['id'], 'error': error})
|
||||
return
|
||||
|
||||
@ -668,7 +668,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._remove_export(share, vserver_client)
|
||||
self._deallocate_container(share_name, vserver_client)
|
||||
else:
|
||||
LOG.info(_LI("Share %s does not exist."), share['id'])
|
||||
LOG.info("Share %s does not exist.", share['id'])
|
||||
|
||||
@na_utils.trace
|
||||
def _deallocate_container(self, share_name, vserver_client):
|
||||
@ -812,9 +812,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
except (exception.InvalidInput,
|
||||
exception.VserverNotSpecified,
|
||||
exception.VserverNotFound) as error:
|
||||
LOG.warning(_LW("Could not determine share server for snapshot "
|
||||
"being deleted: %(snap)s. Deletion of snapshot "
|
||||
"record will proceed anyway. Error: %(error)s"),
|
||||
LOG.warning("Could not determine share server for snapshot "
|
||||
"being deleted: %(snap)s. Deletion of snapshot "
|
||||
"record will proceed anyway. Error: %(error)s",
|
||||
{'snap': snapshot['id'], 'error': error})
|
||||
return
|
||||
|
||||
@ -825,7 +825,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
try:
|
||||
self._delete_snapshot(vserver_client, share_name, snapshot_name)
|
||||
except exception.SnapshotResourceNotFound:
|
||||
msg = _LI("Snapshot %(snap)s does not exist on share %(share)s.")
|
||||
msg = ("Snapshot %(snap)s does not exist on share %(share)s.")
|
||||
msg_args = {'snap': snapshot_name, 'share': share_name}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
@ -1099,9 +1099,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
except (exception.InvalidInput,
|
||||
exception.VserverNotSpecified,
|
||||
exception.VserverNotFound) as error:
|
||||
LOG.warning(_LW("Could not determine share server for consistency "
|
||||
"group being deleted: %(cg)s. Deletion of CG "
|
||||
"record will proceed anyway. Error: %(error)s"),
|
||||
LOG.warning("Could not determine share server for consistency "
|
||||
"group being deleted: %(cg)s. Deletion of CG "
|
||||
"record will proceed anyway. Error: %(error)s",
|
||||
{'cg': cg_dict['id'], 'error': error})
|
||||
|
||||
@na_utils.trace
|
||||
@ -1128,9 +1128,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
except (exception.InvalidInput,
|
||||
exception.VserverNotSpecified,
|
||||
exception.VserverNotFound) as error:
|
||||
LOG.warning(_LW("Could not determine share server for CG snapshot "
|
||||
"being deleted: %(snap)s. Deletion of CG snapshot "
|
||||
"record will proceed anyway. Error: %(error)s"),
|
||||
LOG.warning("Could not determine share server for CG snapshot "
|
||||
"being deleted: %(snap)s. Deletion of CG snapshot "
|
||||
"record will proceed anyway. Error: %(error)s",
|
||||
{'snap': snap_dict['id'], 'error': error})
|
||||
return None, None
|
||||
|
||||
@ -1143,8 +1143,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._delete_snapshot(
|
||||
vserver_client, share_name, snapshot_name)
|
||||
except exception.SnapshotResourceNotFound:
|
||||
msg = _LI("Snapshot %(snap)s does not exist on share "
|
||||
"%(share)s.")
|
||||
msg = ("Snapshot %(snap)s does not exist on share "
|
||||
"%(share)s.")
|
||||
msg_args = {'snap': snapshot_name, 'share': share_name}
|
||||
LOG.info(msg, msg_args)
|
||||
continue
|
||||
@ -1185,9 +1185,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
except (exception.InvalidInput,
|
||||
exception.VserverNotSpecified,
|
||||
exception.VserverNotFound) as error:
|
||||
LOG.warning(_LW("Could not determine share server for share "
|
||||
"%(share)s during access rules update. "
|
||||
"Error: %(error)s"),
|
||||
LOG.warning("Could not determine share server for share "
|
||||
"%(share)s during access rules update. "
|
||||
"Error: %(error)s",
|
||||
{'share': share['id'], 'error': error})
|
||||
return
|
||||
|
||||
@ -1216,8 +1216,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
The self._ssc_stats attribute is updated with the following format.
|
||||
{<aggregate_name> : {<ssc_key>: <ssc_value>}}
|
||||
"""
|
||||
LOG.info(_LI("Updating storage service catalog information for "
|
||||
"backend '%s'"), self._backend_name)
|
||||
LOG.info("Updating storage service catalog information for "
|
||||
"backend '%s'", self._backend_name)
|
||||
|
||||
# Work on a copy and update the ssc data atomically before returning.
|
||||
ssc_stats = copy.deepcopy(self._ssc_stats)
|
||||
@ -1349,7 +1349,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
try:
|
||||
snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE("Could not get snapmirrors for replica %s."),
|
||||
LOG.exception("Could not get snapmirrors for replica %s.",
|
||||
replica['id'])
|
||||
return constants.STATUS_ERROR
|
||||
|
||||
@ -1358,8 +1358,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
try:
|
||||
dm_session.create_snapmirror(active_replica, replica)
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE("Could not create snapmirror for "
|
||||
"replica %s."), replica['id'])
|
||||
LOG.exception("Could not create snapmirror for "
|
||||
"replica %s.", replica['id'])
|
||||
return constants.STATUS_ERROR
|
||||
return constants.REPLICA_STATE_OUT_OF_SYNC
|
||||
|
||||
@ -1381,7 +1381,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
share_name)
|
||||
return constants.REPLICA_STATE_OUT_OF_SYNC
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE("Could not resync snapmirror."))
|
||||
LOG.exception("Could not resync snapmirror.")
|
||||
return constants.STATUS_ERROR
|
||||
|
||||
last_update_timestamp = float(
|
||||
@ -1433,8 +1433,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
context, dm_session, orig_active_replica, replica,
|
||||
access_rules, share_server=share_server))
|
||||
except exception.StorageCommunicationException:
|
||||
LOG.exception(_LE("Could not communicate with the backend "
|
||||
"for replica %s during promotion."),
|
||||
LOG.exception("Could not communicate with the backend "
|
||||
"for replica %s during promotion.",
|
||||
replica['id'])
|
||||
new_active_replica = copy.deepcopy(replica)
|
||||
new_active_replica['replica_state'] = (
|
||||
@ -1524,16 +1524,16 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
replica['status'] = constants.STATUS_ERROR
|
||||
replica['replica_state'] = constants.STATUS_ERROR
|
||||
replica['export_locations'] = []
|
||||
msg = _LE("Failed to change replica (%s) to a SnapMirror "
|
||||
"destination. Replica backend is unreachable.")
|
||||
msg = ("Failed to change replica (%s) to a SnapMirror "
|
||||
"destination. Replica backend is unreachable.")
|
||||
|
||||
LOG.exception(msg, replica['id'])
|
||||
return replica
|
||||
except netapp_api.NaApiError:
|
||||
replica['replica_state'] = constants.STATUS_ERROR
|
||||
replica['export_locations'] = []
|
||||
msg = _LE("Failed to change replica (%s) to a SnapMirror "
|
||||
"destination.")
|
||||
msg = ("Failed to change replica (%s) to a SnapMirror "
|
||||
"destination.")
|
||||
LOG.exception(msg, replica['id'])
|
||||
return replica
|
||||
|
||||
@ -1735,8 +1735,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
share_volume, source_vserver, destination_aggregate)
|
||||
|
||||
except Exception:
|
||||
msg = _LE("Cannot migrate share %(shr)s efficiently between "
|
||||
"%(src)s and %(dest)s.")
|
||||
msg = ("Cannot migrate share %(shr)s efficiently between "
|
||||
"%(src)s and %(dest)s.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
@ -1746,9 +1746,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
else:
|
||||
compatible = True
|
||||
else:
|
||||
msg = _LW("Cluster credentials have not been configured "
|
||||
"with this share driver. Cannot perform volume move "
|
||||
"operations.")
|
||||
msg = ("Cluster credentials have not been configured "
|
||||
"with this share driver. Cannot perform volume move "
|
||||
"operations.")
|
||||
LOG.warning(msg)
|
||||
|
||||
compatibility = {
|
||||
@ -1774,8 +1774,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._client.start_volume_move(
|
||||
share_volume, vserver, destination_aggregate)
|
||||
|
||||
msg = _LI("Began volume move operation of share %(shr)s from %(src)s "
|
||||
"to %(dest)s.")
|
||||
msg = ("Began volume move operation of share %(shr)s from %(src)s "
|
||||
"to %(dest)s.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
@ -1826,8 +1826,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
'cutover_soft_deferred'):
|
||||
status['percent-complete'] = 100
|
||||
|
||||
msg = _LI("Volume move status for share %(share)s: (State) %(state)s. "
|
||||
"(Phase) %(phase)s. Details: %(details)s")
|
||||
msg = ("Volume move status for share %(share)s: (State) %(state)s. "
|
||||
"(Phase) %(phase)s. Details: %(details)s")
|
||||
msg_args = {
|
||||
'state': status['state'],
|
||||
'details': status['details'],
|
||||
@ -1854,13 +1854,13 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
try:
|
||||
self._get_volume_move_status(source_share, share_server)
|
||||
except exception.NetAppException:
|
||||
LOG.exception(_LE("Could not get volume move status."))
|
||||
LOG.exception("Could not get volume move status.")
|
||||
return
|
||||
|
||||
self._client.abort_volume_move(share_volume, vserver)
|
||||
|
||||
msg = _LI("Share volume move operation for share %(shr)s from host "
|
||||
"%(src)s to %(dest)s was successfully aborted.")
|
||||
msg = ("Share volume move operation for share %(shr)s from host "
|
||||
"%(src)s to %(dest)s was successfully aborted.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
@ -1903,9 +1903,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
destination_share['id'])
|
||||
vserver_client.set_volume_name(share_volume, new_share_volume_name)
|
||||
|
||||
msg = _LI("Volume move operation for share %(shr)s has completed "
|
||||
"successfully. Share has been moved from %(src)s to "
|
||||
"%(dest)s.")
|
||||
msg = ("Volume move operation for share %(shr)s has completed "
|
||||
"successfully. Share has been moved from %(src)s to "
|
||||
"%(dest)s.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
|
@ -26,7 +26,7 @@ from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
@ -47,8 +47,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
|
||||
if self._have_cluster_creds:
|
||||
if self.configuration.netapp_vserver:
|
||||
msg = _LW('Vserver is specified in the configuration. This is '
|
||||
'ignored when the driver is managing share servers.')
|
||||
msg = ('Vserver is specified in the configuration. This is '
|
||||
'ignored when the driver is managing share servers.')
|
||||
LOG.warning(msg)
|
||||
|
||||
else: # only have vserver creds, which is an error in multi_svm mode
|
||||
@ -191,7 +191,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
vserver_name)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed to configure Vserver."))
|
||||
LOG.error("Failed to configure Vserver.")
|
||||
self._delete_vserver(vserver_name,
|
||||
security_services=security_services)
|
||||
|
||||
@ -243,7 +243,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
|
||||
network_allocations = network_info.get('admin_network_allocations')
|
||||
if not network_allocations:
|
||||
LOG.info(_LI('No admin network defined for Vserver %s.') %
|
||||
LOG.info('No admin network defined for Vserver %s.' %
|
||||
vserver_name)
|
||||
return
|
||||
|
||||
@ -310,15 +310,15 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
'vserver_name') if server_details else None
|
||||
|
||||
if not vserver:
|
||||
LOG.warning(_LW("Vserver not specified for share server being "
|
||||
"deleted. Deletion of share server record will "
|
||||
"proceed anyway."))
|
||||
LOG.warning("Vserver not specified for share server being "
|
||||
"deleted. Deletion of share server record will "
|
||||
"proceed anyway.")
|
||||
return
|
||||
|
||||
elif not self._client.vserver_exists(vserver):
|
||||
LOG.warning(_LW("Could not find Vserver for share server being "
|
||||
"deleted: %s. Deletion of share server "
|
||||
"record will proceed anyway."), vserver)
|
||||
LOG.warning("Could not find Vserver for share server being "
|
||||
"deleted: %s. Deletion of share server "
|
||||
"record will proceed anyway.", vserver)
|
||||
return
|
||||
|
||||
self._delete_vserver(vserver, security_services=security_services)
|
||||
@ -362,4 +362,4 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
node = interface['home-node']
|
||||
self._client.delete_vlan(node, port, vlan)
|
||||
except exception.NetAppException:
|
||||
LOG.exception(_LE("Deleting Vserver VLAN failed."))
|
||||
LOG.exception("Deleting Vserver VLAN failed.")
|
||||
|
@ -25,7 +25,7 @@ import re
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
|
||||
@ -72,8 +72,8 @@ class NetAppCmodeSingleSVMFileStorageLibrary(
|
||||
'correctly.') % self._vserver
|
||||
raise exception.NetAppException(msg)
|
||||
|
||||
msg = _LI('Using Vserver %(vserver)s for backend %(backend)s with '
|
||||
'%(creds)s credentials.')
|
||||
msg = ('Using Vserver %(vserver)s for backend %(backend)s with '
|
||||
'%(creds)s credentials.')
|
||||
msg_args = {'vserver': self._vserver, 'backend': self._backend_name}
|
||||
msg_args['creds'] = ('cluster' if self._have_cluster_creds
|
||||
else 'Vserver')
|
||||
|
@ -21,7 +21,7 @@ import copy
|
||||
from oslo_log import log as logging
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
|
||||
|
||||
@ -60,9 +60,9 @@ class PerformanceLibrary(object):
|
||||
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
|
||||
else:
|
||||
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
|
||||
LOG.exception(_LE('Could not get performance base counter '
|
||||
'name. Performance-based scheduler '
|
||||
'functions may not be available.'))
|
||||
LOG.exception('Could not get performance base counter '
|
||||
'name. Performance-based scheduler '
|
||||
'functions may not be available.')
|
||||
|
||||
def update_performance_cache(self, flexvol_pools, aggregate_pools):
|
||||
"""Called periodically to update per-pool node utilization metrics."""
|
||||
@ -194,8 +194,8 @@ class PerformanceLibrary(object):
|
||||
return max(min(100.0, node_utilization), 0)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE('Could not calculate node utilization for '
|
||||
'node %s.'), node_name)
|
||||
LOG.exception('Could not calculate node utilization for '
|
||||
'node %s.', node_name)
|
||||
return DEFAULT_UTILIZATION
|
||||
|
||||
def _get_kahuna_utilization(self, counters_t1, counters_t2):
|
||||
@ -343,8 +343,8 @@ class PerformanceLibrary(object):
|
||||
self._get_node_utilization_wafl_counters(node_name) +
|
||||
self._get_node_utilization_processor_counters(node_name))
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE('Could not get utilization counters from node '
|
||||
'%s'), node_name)
|
||||
LOG.exception('Could not get utilization counters from node '
|
||||
'%s', node_name)
|
||||
return None
|
||||
|
||||
def _get_node_utilization_system_counters(self, node_name):
|
||||
|
@ -23,7 +23,6 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.share.drivers.netapp.dataontap.protocols import base
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
|
||||
@ -89,15 +88,15 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
||||
self._is_readonly(new_rules[address]))
|
||||
|
||||
# Rename policy currently in force
|
||||
LOG.info(_LI('Renaming NFS export policy for share %(share)s to '
|
||||
'%(policy)s.') %
|
||||
LOG.info('Renaming NFS export policy for share %(share)s to '
|
||||
'%(policy)s.' %
|
||||
{'share': share_name, 'policy': temp_old_export_policy_name})
|
||||
self._client.rename_nfs_export_policy(export_policy_name,
|
||||
temp_old_export_policy_name)
|
||||
|
||||
# Switch share to the new policy
|
||||
LOG.info(_LI('Setting NFS export policy for share %(share)s to '
|
||||
'%(policy)s.') %
|
||||
LOG.info('Setting NFS export policy for share %(share)s to '
|
||||
'%(policy)s.' %
|
||||
{'share': share_name, 'policy': temp_new_export_policy_name})
|
||||
self._client.set_nfs_export_policy_for_volume(
|
||||
share_name, temp_new_export_policy_name)
|
||||
@ -106,8 +105,8 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
||||
self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name)
|
||||
|
||||
# Rename new policy to its final name
|
||||
LOG.info(_LI('Renaming NFS export policy for share %(share)s to '
|
||||
'%(policy)s.') %
|
||||
LOG.info('Renaming NFS export policy for share %(share)s to '
|
||||
'%(policy)s.' %
|
||||
{'share': share_name, 'policy': export_policy_name})
|
||||
self._client.rename_nfs_export_policy(temp_new_export_policy_name,
|
||||
export_policy_name)
|
||||
@ -117,8 +116,8 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
||||
"""Checks whether access rule type and level are valid."""
|
||||
|
||||
if rule['access_type'] != 'ip':
|
||||
msg = _("Clustered Data ONTAP supports only 'ip' type for share "
|
||||
"access rules with NFS protocol.")
|
||||
msg = ("Clustered Data ONTAP supports only 'ip' type for share "
|
||||
"access rules with NFS protocol.")
|
||||
raise exception.InvalidShareAccess(reason=msg)
|
||||
|
||||
if rule['access_level'] not in constants.ACCESS_LEVELS:
|
||||
|
@ -24,7 +24,7 @@ from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila import version
|
||||
|
||||
|
||||
@ -43,9 +43,9 @@ def validate_driver_instantiation(**kwargs):
|
||||
"""
|
||||
if kwargs and kwargs.get('netapp_mode') == 'proxy':
|
||||
return
|
||||
LOG.warning(_LW('Please use NetAppDriver in the configuration file '
|
||||
'to load the driver instead of directly specifying '
|
||||
'the driver module name.'))
|
||||
LOG.warning('Please use NetAppDriver in the configuration file '
|
||||
'to load the driver instead of directly specifying '
|
||||
'the driver module name.')
|
||||
|
||||
|
||||
def check_flags(required_flags, configuration):
|
||||
@ -74,7 +74,7 @@ def setup_tracing(trace_flags_string):
|
||||
flags = trace_flags_string.split(',')
|
||||
flags = [flag.strip() for flag in flags]
|
||||
for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)):
|
||||
LOG.warning(_LW('Invalid trace flag: %s') % invalid_flag)
|
||||
LOG.warning('Invalid trace flag: %s' % invalid_flag)
|
||||
TRACE_METHOD = 'method' in flags
|
||||
TRACE_API = 'api' in flags
|
||||
|
||||
@ -164,7 +164,7 @@ class OpenStackInfo(object):
|
||||
"'%{version}\t%{release}\t%{vendor}'",
|
||||
self.PACKAGE_NAME)
|
||||
if not out:
|
||||
LOG.info(_LI('No rpm info found for %(pkg)s package.') % {
|
||||
LOG.info('No rpm info found for %(pkg)s package.' % {
|
||||
'pkg': self.PACKAGE_NAME})
|
||||
return False
|
||||
parts = out.split()
|
||||
@ -173,7 +173,7 @@ class OpenStackInfo(object):
|
||||
self._vendor = ' '.join(parts[2::])
|
||||
return True
|
||||
except Exception as e:
|
||||
LOG.info(_LI('Could not run rpm command: %(msg)s.') % {
|
||||
LOG.info('Could not run rpm command: %(msg)s.' % {
|
||||
'msg': e})
|
||||
return False
|
||||
|
||||
@ -185,9 +185,9 @@ class OpenStackInfo(object):
|
||||
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
|
||||
self.PACKAGE_NAME)
|
||||
if not out:
|
||||
LOG.info(_LI(
|
||||
'No dpkg-query info found for %(pkg)s package.') % {
|
||||
'pkg': self.PACKAGE_NAME})
|
||||
LOG.info(
|
||||
'No dpkg-query info found for %(pkg)s package.' % {
|
||||
'pkg': self.PACKAGE_NAME})
|
||||
return False
|
||||
# Debian format: [epoch:]upstream_version[-debian_revision]
|
||||
deb_version = out
|
||||
@ -204,7 +204,7 @@ class OpenStackInfo(object):
|
||||
self._vendor = _vendor
|
||||
return True
|
||||
except Exception as e:
|
||||
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
|
||||
LOG.info('Could not run dpkg-query command: %(msg)s.' % {
|
||||
'msg': e})
|
||||
return False
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.nexenta.ns4 import nexenta_nfs_helper
|
||||
from manila.share.drivers.nexenta import options
|
||||
@ -98,7 +98,7 @@ class NexentaNasDriver(driver.ShareDriver):
|
||||
LOG.debug('Creating a snapshot of share %s.', snapshot['share_name'])
|
||||
snap_id = self.helper.create_snapshot(
|
||||
snapshot['share_name'], snapshot['name'])
|
||||
LOG.info(_LI('Created snapshot %s.'), snap_id)
|
||||
LOG.info('Created snapshot %s.', snap_id)
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Delete a snapshot."""
|
||||
|
@ -18,7 +18,7 @@ from oslo_utils import excutils
|
||||
|
||||
from manila.common import constants as common
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.nexenta.ns4 import jsonrpc
|
||||
from manila.share.drivers.nexenta import utils
|
||||
|
||||
@ -112,8 +112,8 @@ class NFSHelper(object):
|
||||
except exception.NexentaException as e:
|
||||
with excutils.save_and_reraise_exception() as exc:
|
||||
if NOT_EXIST in e.args[0]:
|
||||
LOG.info(_LI('Folder %s does not exist, it was '
|
||||
'already deleted.'), folder)
|
||||
LOG.info('Folder %s does not exist, it was '
|
||||
'already deleted.', folder)
|
||||
exc.reraise = False
|
||||
|
||||
def _get_share_path(self, share_name):
|
||||
@ -137,20 +137,20 @@ class NFSHelper(object):
|
||||
except exception.NexentaException as e:
|
||||
with excutils.save_and_reraise_exception() as exc:
|
||||
if NOT_EXIST in e.args[0]:
|
||||
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not '
|
||||
'exist, it was already deleted.'),
|
||||
LOG.info('Snapshot %(folder)s@%(snapshot)s does not '
|
||||
'exist, it was already deleted.',
|
||||
{
|
||||
'folder': share_name,
|
||||
'snapshot': snapshot_name,
|
||||
})
|
||||
})
|
||||
exc.reraise = False
|
||||
elif DEP_CLONES in e.args[0]:
|
||||
LOG.info(_LI(
|
||||
LOG.info(
|
||||
'Snapshot %(folder)s@%(snapshot)s has dependent '
|
||||
'clones, it will be deleted later.'), {
|
||||
'clones, it will be deleted later.', {
|
||||
'folder': share_name,
|
||||
'snapshot': snapshot_name
|
||||
})
|
||||
})
|
||||
exc.reraise = False
|
||||
|
||||
def create_share_from_snapshot(self, share, snapshot):
|
||||
|
@ -18,7 +18,7 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants as common
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW, _LE
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.nexenta.ns5 import jsonrpc
|
||||
from manila.share.drivers.nexenta import options
|
||||
@ -156,9 +156,9 @@ class NexentaNasDriver(driver.ShareDriver):
|
||||
try:
|
||||
self.delete_share(None, share)
|
||||
except exception.NexentaException as exc:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"Cannot destroy created filesystem: %(vol)s/%(folder)s, "
|
||||
"exception: %(exc)s"),
|
||||
"exception: %(exc)s",
|
||||
{'vol': self.pool_name, 'folder': '/'.join(
|
||||
(self.fs_prefix, share['name'])), 'exc': exc})
|
||||
raise
|
||||
@ -194,12 +194,12 @@ class NexentaNasDriver(driver.ShareDriver):
|
||||
self._add_permission(share['name'])
|
||||
except exception.NexentaException:
|
||||
LOG.exception(
|
||||
_LE('Failed to add permissions for %s'), share['name'])
|
||||
('Failed to add permissions for %s'), share['name'])
|
||||
try:
|
||||
self.delete_share(None, share)
|
||||
except exception.NexentaException:
|
||||
LOG.warning(_LW("Cannot destroy cloned filesystem: "
|
||||
"%(vol)s/%(filesystem)s"),
|
||||
LOG.warning("Cannot destroy cloned filesystem: "
|
||||
"%(vol)s/%(filesystem)s",
|
||||
{'vol': self.pool_name,
|
||||
'filesystem': '/'.join(
|
||||
(self.fs_prefix, share['name']))})
|
||||
@ -269,7 +269,7 @@ class NexentaNasDriver(driver.ShareDriver):
|
||||
except exception.NexentaException as e:
|
||||
if e.kwargs['code'] == 'ENOENT':
|
||||
LOG.warning(
|
||||
_LW('snapshot %(name)s not found, response: %(msg)s'), {
|
||||
'snapshot %(name)s not found, response: %(msg)s', {
|
||||
'name': snapshot['name'], 'msg': e.msg})
|
||||
else:
|
||||
raise
|
||||
|
@ -25,8 +25,8 @@ from oslo_utils import units
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila import share
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.qnap import api
|
||||
from manila import utils
|
||||
@ -83,9 +83,9 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
try:
|
||||
self.api_executor = self._create_api_executor()
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to create HTTP client. Check IP '
|
||||
'address, port, username, password and make '
|
||||
'sure the array version is compatible.'))
|
||||
LOG.exception('Failed to create HTTP client. Check IP '
|
||||
'address, port, username, password and make '
|
||||
'sure the array version is compatible.')
|
||||
raise
|
||||
|
||||
def check_for_setup_error(self):
|
||||
@ -301,7 +301,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
# Use private_storage to retreive volume ID created in the NAS.
|
||||
volID = self.private_storage.get(share['id'], 'volID')
|
||||
if not volID:
|
||||
LOG.warning(_LW('volID for Share %s does not exist'), share['id'])
|
||||
LOG.warning('volID for Share %s does not exist', share['id'])
|
||||
return
|
||||
LOG.debug('volID: %s', volID)
|
||||
|
||||
@ -309,7 +309,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
self.configuration.qnap_poolname,
|
||||
vol_no=volID)
|
||||
if del_share is None:
|
||||
LOG.warning(_LW('Share %s does not exist'), share['id'])
|
||||
LOG.warning('Share %s does not exist', share['id'])
|
||||
return
|
||||
|
||||
vol_no = del_share.find('vol_no').text
|
||||
@ -350,7 +350,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
volID = self.private_storage.get(snapshot['share']['id'], 'volID')
|
||||
if not volID:
|
||||
LOG.warning(
|
||||
_LW('volID for Share %s does not exist'),
|
||||
'volID for Share %s does not exist',
|
||||
snapshot['share']['id'])
|
||||
raise exception.ShareResourceNotFound(
|
||||
share_id=snapshot['share']['id'])
|
||||
@ -401,7 +401,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
snapshot_id = (snapshot.get('provider_location') or
|
||||
self.private_storage.get(snapshot['id'], 'snapshot_id'))
|
||||
if not snapshot_id:
|
||||
LOG.warning(_LW('Snapshot %s does not exist'), snapshot['id'])
|
||||
LOG.warning('Snapshot %s does not exist', snapshot['id'])
|
||||
return
|
||||
LOG.debug('snapshot_id: %s', snapshot_id)
|
||||
|
||||
@ -421,7 +421,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
snapshot_id = (snapshot.get('provider_location') or
|
||||
self.private_storage.get(snapshot['id'], 'snapshot_id'))
|
||||
if not snapshot_id:
|
||||
LOG.warning(_LW('Snapshot %s does not exist'), snapshot['id'])
|
||||
LOG.warning('Snapshot %s does not exist', snapshot['id'])
|
||||
raise exception.SnapshotResourceNotFound(name=snapshot['id'])
|
||||
LOG.debug('snapshot_id: %s', snapshot_id)
|
||||
|
||||
@ -568,7 +568,7 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
try:
|
||||
self._check_share_access(share_proto, access_type)
|
||||
except exception.InvalidShareAccess:
|
||||
LOG.warning(_LW('The denied rule is invalid and does not exist.'))
|
||||
LOG.warning('The denied rule is invalid and does not exist.')
|
||||
return
|
||||
|
||||
hostlist = self.api_executor.get_host_list()
|
||||
@ -603,8 +603,8 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
"""Manages a share that exists on backend."""
|
||||
if share['share_proto'].lower() == 'nfs':
|
||||
# 10.0.0.1:/share/example
|
||||
LOG.info(_LI("Share %(shr_path)s will be managed with ID "
|
||||
"%(shr_id)s."),
|
||||
LOG.info("Share %(shr_path)s will be managed with ID"
|
||||
"%(shr_id)s.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
@ -646,8 +646,8 @@ class QnapShareDriver(driver.ShareDriver):
|
||||
volName = self.private_storage.get(share['id'], 'volName')
|
||||
LOG.debug('volName: %s', volName)
|
||||
|
||||
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s."),
|
||||
LOG.info("Share %(shr_path)s was successfully managed with ID "
|
||||
"%(shr_id)s.",
|
||||
{'shr_path': share['export_locations'][0]['path'],
|
||||
'shr_id': share['id']})
|
||||
|
||||
|
@ -28,7 +28,6 @@ import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _LW
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -48,9 +47,9 @@ class JsonRpc(object):
|
||||
if self._url_scheme == 'https':
|
||||
if not self._ca_file:
|
||||
self._ca_file = False
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"Will not verify the server certificate of the API service"
|
||||
" because the CA certificate is not available."))
|
||||
" because the CA certificate is not available.")
|
||||
self._id = 0
|
||||
self._credentials = auth.HTTPBasicAuth(
|
||||
user_credentials[0], user_credentials[1])
|
||||
|
@ -29,7 +29,7 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.quobyte import jsonrpc
|
||||
|
||||
@ -121,7 +121,7 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
||||
try:
|
||||
self.rpc.call('getInformation', {})
|
||||
except Exception as exc:
|
||||
LOG.error(_LE("Could not connect to API: %s"), exc)
|
||||
LOG.error("Could not connect to API: %s", exc)
|
||||
raise exception.QBException(
|
||||
_('Could not connect to API: %s') % exc)
|
||||
|
||||
@ -143,8 +143,8 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
||||
|
||||
total = float(result['total_physical_capacity'])
|
||||
used = float(result['total_physical_usage'])
|
||||
LOG.info(_LI('Read capacity of %(cap)s bytes and '
|
||||
'usage of %(use)s bytes from backend. '),
|
||||
LOG.info('Read capacity of %(cap)s bytes and '
|
||||
'usage of %(use)s bytes from backend. ',
|
||||
{'cap': total, 'use': used})
|
||||
free = total - used
|
||||
if free < 0:
|
||||
@ -244,8 +244,8 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
||||
share['name'],
|
||||
self._get_project_name(context, share['project_id']))
|
||||
if not volume_uuid:
|
||||
LOG.warning(_LW("No volume found for "
|
||||
"share %(project_id)s/%(name)s")
|
||||
LOG.warning("No volume found for "
|
||||
"share %(project_id)s/%(name)s"
|
||||
% {"project_id": share['project_id'],
|
||||
"name": share['name']})
|
||||
return
|
||||
@ -374,7 +374,7 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
||||
self._allow_access(context, share, a_rule)
|
||||
else:
|
||||
if not access_rules:
|
||||
LOG.warning(_LW("No access rules provided in update_access."))
|
||||
LOG.warning("No access rules provided in update_access.")
|
||||
else:
|
||||
# Handling access rule recovery
|
||||
existing_rules = self._fetch_existing_access(context, share)
|
||||
|
@ -32,7 +32,7 @@ from manila.common import constants as const
|
||||
from manila import compute
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
from manila.network.linux import ip_lib
|
||||
from manila.network.neutron import api as neutron
|
||||
from manila import utils
|
||||
@ -327,8 +327,8 @@ class ServiceInstanceManager(object):
|
||||
name = name or self.get_config_option(
|
||||
"service_instance_security_group")
|
||||
if not name:
|
||||
LOG.warning(_LW("Name for service instance security group is not "
|
||||
"provided. Skipping security group step."))
|
||||
LOG.warning("Name for service instance security group is not "
|
||||
"provided. Skipping security group step.")
|
||||
return None
|
||||
s_groups = [s for s in self.compute_api.security_group_list(context)
|
||||
if s.name == name]
|
||||
@ -359,15 +359,15 @@ class ServiceInstanceManager(object):
|
||||
def ensure_service_instance(self, context, server):
|
||||
"""Ensures that server exists and active."""
|
||||
if 'instance_id' not in server:
|
||||
LOG.warning(_LW("Unable to check server existence since "
|
||||
"'instance_id' key is not set in share server "
|
||||
"backend details."))
|
||||
LOG.warning("Unable to check server existence since "
|
||||
"'instance_id' key is not set in share server "
|
||||
"backend details.")
|
||||
return False
|
||||
try:
|
||||
inst = self.compute_api.server_get(self.admin_context,
|
||||
server['instance_id'])
|
||||
except exception.InstanceNotFound:
|
||||
LOG.warning(_LW("Service instance %s does not exist."),
|
||||
LOG.warning("Service instance %s does not exist.",
|
||||
server['instance_id'])
|
||||
return False
|
||||
if inst['status'] == 'ACTIVE':
|
||||
@ -510,11 +510,11 @@ class ServiceInstanceManager(object):
|
||||
raise exception.ServiceInstanceException(
|
||||
_('Neither service instance password nor key are available.'))
|
||||
if not key_path:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
'No key path is available. May be non-existent key path is '
|
||||
'provided. Check path_to_private_key (current value '
|
||||
'%(private_path)s) and path_to_public_key (current value '
|
||||
'%(public_path)s) in manila configuration file.'), dict(
|
||||
'%(public_path)s) in manila configuration file.', dict(
|
||||
private_path=self.path_to_private_key,
|
||||
public_path=self.path_to_public_key))
|
||||
network_data = self.network_helper.setup_network(network_info)
|
||||
@ -965,8 +965,8 @@ class NeutronNetworkHelper(BaseNetworkhelper):
|
||||
addr_list = device.addr.list()
|
||||
except Exception as e:
|
||||
if 'does not exist' in six.text_type(e):
|
||||
LOG.warning(_LW(
|
||||
"Device %s does not exist anymore.") % device.name)
|
||||
LOG.warning(
|
||||
"Device %s does not exist anymore." % device.name)
|
||||
else:
|
||||
raise
|
||||
for addr in addr_list:
|
||||
|
@ -23,11 +23,11 @@ import six
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from manila import utils
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share import utils as share_utils
|
||||
from manila import utils
|
||||
|
||||
tegile_opts = [
|
||||
cfg.StrOpt('tegile_nas_server',
|
||||
@ -201,7 +201,7 @@ class TegileShareDriver(driver.ShareDriver):
|
||||
# of 'sharename' if inherited share properties are selected.
|
||||
ip, real_share_name = self._api('createShare', params).split()
|
||||
|
||||
LOG.info(_LI("Created share %(sharename)s, share id %(shid)s."),
|
||||
LOG.info("Created share %(sharename)s, share id %(shid)s.",
|
||||
{'sharename': share_name, 'shid': share['id']})
|
||||
|
||||
return self._get_location_path(real_share_name, share_proto, ip)
|
||||
@ -273,8 +273,8 @@ class TegileShareDriver(driver.ShareDriver):
|
||||
|
||||
params = (share, snap_name, False)
|
||||
|
||||
LOG.info(_LI('Creating snapshot for share_name=%(shr)s'
|
||||
' snap_name=%(name)s'),
|
||||
LOG.info('Creating snapshot for share_name=%(shr)s'
|
||||
' snap_name=%(name)s',
|
||||
{'shr': share_name, 'name': snap_name})
|
||||
|
||||
self._api('createShareSnapshot', params)
|
||||
@ -383,18 +383,18 @@ class TegileShareDriver(driver.ShareDriver):
|
||||
|
||||
def _check_share_access(self, share_proto, access_type):
|
||||
if share_proto == 'CIFS' and access_type != 'user':
|
||||
reason = _LW('Only USER access type is allowed for '
|
||||
'CIFS shares.')
|
||||
reason = ('Only USER access type is allowed for '
|
||||
'CIFS shares.')
|
||||
LOG.warning(reason)
|
||||
raise exception.InvalidShareAccess(reason=reason)
|
||||
elif share_proto == 'NFS' and access_type not in ('ip', 'user'):
|
||||
reason = _LW('Only IP or USER access types are allowed for '
|
||||
'NFS shares.')
|
||||
reason = ('Only IP or USER access types are allowed for '
|
||||
'NFS shares.')
|
||||
LOG.warning(reason)
|
||||
raise exception.InvalidShareAccess(reason=reason)
|
||||
elif share_proto not in ('NFS', 'CIFS'):
|
||||
reason = _LW('Unsupported protocol \"%s\" specified for '
|
||||
'access rule.') % share_proto
|
||||
reason = ('Unsupported protocol \"%s\" specified for '
|
||||
'access rule.') % share_proto
|
||||
raise exception.InvalidShareAccess(reason=reason)
|
||||
|
||||
@debugger
|
||||
|
@ -21,7 +21,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers import service_instance
|
||||
from manila.share.drivers.windows import windows_utils
|
||||
from manila.share.drivers.windows import winrm_helper
|
||||
@ -232,21 +232,21 @@ class WindowsServiceInstanceManager(service_instance.ServiceInstanceManager):
|
||||
|
||||
def get_valid_security_service(self, security_services):
|
||||
if not security_services:
|
||||
LOG.info(_LI("No security services provided."))
|
||||
LOG.info("No security services provided.")
|
||||
elif len(security_services) > 1:
|
||||
LOG.warning(_LW("Multiple security services provided. Only one "
|
||||
"security service of type 'active_directory' "
|
||||
"is supported."))
|
||||
LOG.warning("Multiple security services provided. Only one "
|
||||
"security service of type 'active_directory' "
|
||||
"is supported.")
|
||||
else:
|
||||
security_service = security_services[0]
|
||||
security_service_type = security_service['type']
|
||||
if security_service_type == 'active_directory':
|
||||
return security_service
|
||||
else:
|
||||
LOG.warning(_LW("Only security services of type "
|
||||
"'active_directory' are supported. "
|
||||
"Retrieved security "
|
||||
"service type: %(sec_type)s."),
|
||||
LOG.warning("Only security services of type "
|
||||
"'active_directory' are supported. "
|
||||
"Retrieved security "
|
||||
"service type: %(sec_type)s.",
|
||||
{'sec_type': security_service_type})
|
||||
return None
|
||||
|
||||
|
@ -18,7 +18,6 @@ import os
|
||||
from oslo_log import log
|
||||
from oslo_utils import units
|
||||
|
||||
from manila.i18n import _LW
|
||||
from manila.share import driver as base_driver
|
||||
from manila.share.drivers import generic
|
||||
from manila.share.drivers.windows import service_instance
|
||||
@ -67,9 +66,9 @@ class WindowsSMBDriver(generic.GenericShareDriver):
|
||||
security_service['user'],
|
||||
security_service['password'])
|
||||
except Exception as exc:
|
||||
LOG.warning(_LW("Failed to remove service instance "
|
||||
"%(instance_id)s from domain %(domain)s. "
|
||||
"Exception: %(exc)s."),
|
||||
LOG.warning("Failed to remove service instance "
|
||||
"%(instance_id)s from domain %(domain)s. "
|
||||
"Exception: %(exc)s.",
|
||||
dict(instance_id=server_details['instance_id'],
|
||||
domain=security_service['domain'],
|
||||
exc=exc))
|
||||
|
@ -20,7 +20,6 @@ from oslo_log import log
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _LI, _LW
|
||||
from manila.share.drivers import helpers
|
||||
from manila.share.drivers.windows import windows_utils
|
||||
|
||||
@ -78,7 +77,7 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
||||
'-ReadAccess', "*%s" % self._NULL_SID]
|
||||
self._remote_exec(server, cmd)
|
||||
else:
|
||||
LOG.info(_LI("Skipping creating export %s as it already exists."),
|
||||
LOG.info("Skipping creating export %s as it already exists.",
|
||||
share_name)
|
||||
return self.get_exports_for_share(server, export_location)
|
||||
|
||||
@ -127,20 +126,20 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
||||
share_name)
|
||||
else:
|
||||
LOG.warning(
|
||||
_LW("Found explicit deny ACE rule that was not "
|
||||
"created by Manila and will be ignored: %s"),
|
||||
"Found explicit deny ACE rule that was not "
|
||||
"created by Manila and will be ignored: %s",
|
||||
raw_acl)
|
||||
continue
|
||||
if access_level == self._ACCESS_LEVEL_CUSTOM:
|
||||
LOG.warning(
|
||||
_LW("Found 'custom' ACE rule that will be ignored: %s"),
|
||||
"Found 'custom' ACE rule that will be ignored: %s",
|
||||
raw_acl)
|
||||
continue
|
||||
elif access_right == self._WIN_ACCESS_RIGHT_FULL:
|
||||
LOG.warning(
|
||||
_LW("Account '%(access_to)s' was given full access "
|
||||
"right on share %(share_name)s. Manila only "
|
||||
"grants 'change' access."),
|
||||
"Account '%(access_to)s' was given full access "
|
||||
"right on share %(share_name)s. Manila only "
|
||||
"grants 'change' access.",
|
||||
{'access_to': access_to,
|
||||
'share_name': share_name})
|
||||
|
||||
@ -159,8 +158,8 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
||||
"-AccountName", "'%s'" % access_to, "-Force"]
|
||||
self._remote_exec(server, cmd)
|
||||
self._refresh_acl(server, share_name)
|
||||
LOG.info(_LI("Granted %(access_level)s access to '%(access_to)s' "
|
||||
"on share %(share_name)s"),
|
||||
LOG.info("Granted %(access_level)s access to '%(access_to)s' "
|
||||
"on share %(share_name)s",
|
||||
{'access_level': access_level,
|
||||
'access_to': access_to,
|
||||
'share_name': share_name})
|
||||
@ -174,8 +173,8 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
||||
'-AccountName', '"%s"' % access_to, '-Force']
|
||||
self._remote_exec(server, cmd)
|
||||
self._refresh_acl(server, share_name)
|
||||
LOG.info(_LI("Revoked access to '%(access_to)s' "
|
||||
"on share %(share_name)s"),
|
||||
LOG.info("Revoked access to '%(access_to)s' "
|
||||
"on share %(share_name)s",
|
||||
{'access_to': access_to,
|
||||
'share_name': share_name})
|
||||
|
||||
@ -207,12 +206,12 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
||||
except (exception.InvalidShareAccess,
|
||||
exception.InvalidShareAccessLevel):
|
||||
# This check will allow invalid rules to be deleted.
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"Unsupported access level %(level)s or access type "
|
||||
"%(type)s, skipping removal of access rule to "
|
||||
"%(to)s.") % {'level': deleted_rule['access_level'],
|
||||
'type': deleted_rule['access_type'],
|
||||
'to': deleted_rule['access_to']})
|
||||
"%(to)s." % {'level': deleted_rule['access_level'],
|
||||
'type': deleted_rule['access_type'],
|
||||
'to': deleted_rule['access_to']})
|
||||
continue
|
||||
self._revoke_share_access(server, share_name,
|
||||
deleted_rule['access_to'])
|
||||
|
@ -17,7 +17,6 @@ import re
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from manila.i18n import _LI
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -125,9 +124,9 @@ class WindowsUtils(object):
|
||||
# NOTE(lpetrut): An instance reboot is needed but this will be
|
||||
# performed using Nova so that the instance state can be
|
||||
# retrieved easier.
|
||||
LOG.info(_LI("Joining server %(ip)s to Active Directory "
|
||||
"domain %(domain)s"), dict(ip=server['ip'],
|
||||
domain=domain))
|
||||
LOG.info("Joining server %(ip)s to Active Directory "
|
||||
"domain %(domain)s", dict(ip=server['ip'],
|
||||
domain=domain))
|
||||
cmds = [
|
||||
('$password = "%s" | '
|
||||
'ConvertTo-SecureString -asPlainText -Force' % admin_password),
|
||||
|
@ -30,7 +30,7 @@ from oslo_utils import timeutils
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import configuration
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.zfsonlinux import utils as zfs_utils
|
||||
@ -279,7 +279,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
self.zfs('destroy', '-f', name)
|
||||
return
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.info(_LI("Failed to destroy ZFS dataset, retrying one time"))
|
||||
LOG.info("Failed to destroy ZFS dataset, retrying one time")
|
||||
|
||||
# NOTE(bswartz): There appears to be a bug in ZFS when creating and
|
||||
# destroying datasets concurrently where the filesystem remains mounted
|
||||
@ -529,8 +529,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
break
|
||||
else:
|
||||
LOG.warning(
|
||||
_LW("Share with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted."),
|
||||
"Share with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted.",
|
||||
{'id': share['id'], 'name': dataset_name})
|
||||
self.private_storage.delete(share['id'])
|
||||
|
||||
@ -574,8 +574,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
break
|
||||
else:
|
||||
LOG.warning(
|
||||
_LW("Snapshot with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted."),
|
||||
"Snapshot with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted.",
|
||||
{'id': snapshot['id'], 'name': snapshot_name})
|
||||
|
||||
@ensure_share_server_not_provided
|
||||
@ -972,8 +972,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
break
|
||||
else:
|
||||
LOG.warning(
|
||||
_LW("Share replica with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted."),
|
||||
"Share replica with '%(id)s' ID and '%(name)s' NAME is "
|
||||
"absent on backend. Nothing has been deleted.",
|
||||
{'id': replica['id'], 'name': dataset_name})
|
||||
self.private_storage.delete(replica['id'])
|
||||
|
||||
@ -1131,7 +1131,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
'sudo', 'zfs', 'receive', '-vF', dataset_name,
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
|
||||
LOG.warning("Failed to sync replica %(id)s. %(e)s",
|
||||
{'id': repl['id'], 'e': e})
|
||||
replica_dict[repl['id']]['replica_state'] = (
|
||||
constants.REPLICA_STATE_OUT_OF_SYNC)
|
||||
@ -1153,7 +1153,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
constants.REPLICA_STATE_IN_SYNC)
|
||||
except Exception as e:
|
||||
LOG.warning(
|
||||
_LW("Failed to update currently active replica. \n%s"), e)
|
||||
"Failed to update currently active replica. \n%s", e)
|
||||
|
||||
replica_dict[active_replica['id']]['replica_state'] = (
|
||||
constants.REPLICA_STATE_OUT_OF_SYNC)
|
||||
@ -1185,7 +1185,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
'sudo', 'zfs', 'receive', '-vF', dataset_name,
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
|
||||
LOG.warning("Failed to sync replica %(id)s. %(e)s",
|
||||
{'id': repl['id'], 'e': e})
|
||||
replica_dict[repl['id']]['replica_state'] = (
|
||||
constants.REPLICA_STATE_OUT_OF_SYNC)
|
||||
@ -1274,7 +1274,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(
|
||||
_LW("Failed to sync snapshot instance %(id)s. %(e)s"),
|
||||
"Failed to sync snapshot instance %(id)s. %(e)s",
|
||||
{'id': replica_snapshot['id'], 'e': e})
|
||||
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
|
||||
constants.STATUS_ERROR)
|
||||
@ -1526,8 +1526,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
x for x in line.strip().split(' ') if x != ''][1]
|
||||
self.execute('sudo', 'kill', '-9', migr_pid)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW(
|
||||
"Caught following error trying to kill migration process: %s"),
|
||||
LOG.warning(
|
||||
"Caught following error trying to kill migration process: %s",
|
||||
e)
|
||||
|
||||
# Sleep couple of seconds before destroying updated objects
|
||||
@ -1544,9 +1544,9 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
'sudo', 'zfs', 'destroy', '-r', dst_dataset_name,
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"Failed to destroy destination dataset with following error: "
|
||||
"%s"),
|
||||
"%s",
|
||||
e)
|
||||
|
||||
LOG.debug(
|
||||
|
@ -28,7 +28,7 @@ import six
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.ganesha import utils as ganesha_utils
|
||||
from manila import utils
|
||||
@ -103,7 +103,7 @@ class ExecuteMixin(driver.ExecuteMixin):
|
||||
try:
|
||||
return self.execute(*cmd, **kwargs)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_LW("Failed to run command, got error: %s"), e)
|
||||
LOG.warning("Failed to run command, got error: %s", e)
|
||||
raise
|
||||
|
||||
def _get_option(self, resource_name, option_name, pool_level=False,
|
||||
@ -201,8 +201,8 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
||||
self._is_kernel_version = True
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.info(
|
||||
_LI("Looks like ZFS kernel module is absent. "
|
||||
"Assuming FUSE version is installed. Error: %s"), e)
|
||||
"Looks like ZFS kernel module is absent. "
|
||||
"Assuming FUSE version is installed. Error: %s", e)
|
||||
self._is_kernel_version = False
|
||||
return self._is_kernel_version
|
||||
|
||||
@ -215,7 +215,7 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
||||
try:
|
||||
self.execute('sudo', 'exportfs')
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_LE("Call of 'exportfs' utility returned error."))
|
||||
LOG.exception("Call of 'exportfs' utility returned error.")
|
||||
raise
|
||||
|
||||
# Init that class instance attribute on start of manila-share service
|
||||
@ -300,8 +300,8 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
||||
break
|
||||
else:
|
||||
LOG.warning(
|
||||
_LW("Dataset with '%(name)s' NAME is absent on backend. "
|
||||
"Access rules were not applied."), {'name': dataset_name})
|
||||
"Dataset with '%(name)s' NAME is absent on backend. "
|
||||
"Access rules were not applied.", {'name': dataset_name})
|
||||
|
||||
# NOTE(vponomaryov): Setting of ZFS share options does not remove rules
|
||||
# that were added and then removed. So, remove them explicitly.
|
||||
|
@ -18,7 +18,7 @@ from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share.drivers.zfssa import restclient
|
||||
|
||||
|
||||
@ -233,12 +233,12 @@ class ZFSSAApi(object):
|
||||
svc = self.share_path % (pool, project, share)
|
||||
ret = self.rclient.delete(svc)
|
||||
if ret.status != restclient.Status.NO_CONTENT:
|
||||
exception_msg = (_LE('Error deleting '
|
||||
'share: %(share)s to '
|
||||
'pool: %(pool)s '
|
||||
'project: %(project)s '
|
||||
'return code: %(ret.status)d '
|
||||
'message: %(ret.data)s.'),
|
||||
exception_msg = (('Error deleting '
|
||||
'share: %(share)s to '
|
||||
'pool: %(pool)s '
|
||||
'project: %(project)s '
|
||||
'return code: %(ret.status)d '
|
||||
'message: %(ret.data)s.'),
|
||||
{'share': share,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
@ -391,7 +391,7 @@ class ZFSSAApi(object):
|
||||
svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']}
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status == restclient.Status.OK:
|
||||
LOG.warning(_LW('Property %s already exists.'), schema['property'])
|
||||
LOG.warning('Property %s already exists.', schema['property'])
|
||||
return
|
||||
ret = self.rclient.post(base, schema)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
|
@ -24,7 +24,6 @@ import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.zfssa import zfssarest
|
||||
|
||||
@ -278,7 +277,7 @@ class ZFSSAShareDriver(driver.ShareDriver):
|
||||
snapshot['share_id'],
|
||||
snapshot['id'])
|
||||
if has_clones:
|
||||
LOG.error(_LE("snapshot %s: has clones"), snapshot['id'])
|
||||
LOG.error("snapshot %s: has clones", snapshot['id'])
|
||||
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot['id'])
|
||||
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
@ -303,7 +302,7 @@ class ZFSSAShareDriver(driver.ShareDriver):
|
||||
try:
|
||||
details = self._get_share_details(name)
|
||||
except Exception:
|
||||
LOG.error(_LE('Cannot manage share %s'), name)
|
||||
LOG.error('Cannot manage share %s', name)
|
||||
raise
|
||||
|
||||
lcfg = self.configuration
|
||||
@ -438,8 +437,8 @@ class ZFSSAShareDriver(driver.ShareDriver):
|
||||
new_size_byte = int(new_size) * units.Gi
|
||||
|
||||
if used_space > new_size_byte:
|
||||
LOG.error(_LE('%(used).1fGB of share %(id)s is already used. '
|
||||
'Cannot shrink to %(newsize)dGB.'),
|
||||
LOG.error('%(used).1fGB of share %(id)s is already used. '
|
||||
'Cannot shrink to %(newsize)dGB.',
|
||||
{'used': float(used_space) / units.Gi,
|
||||
'id': share['id'],
|
||||
'newsize': new_size})
|
||||
|
@ -29,7 +29,6 @@ from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import context as ctxt
|
||||
from manila.i18n import _LW
|
||||
|
||||
|
||||
hook_options = [
|
||||
@ -110,7 +109,7 @@ class HookBase(object):
|
||||
*args, **kwargs)
|
||||
except Exception as e:
|
||||
if self.suppress_pre_hooks_errors:
|
||||
LOG.warning(_LW("\nSuppressed exception in pre hook. %s\n"), e)
|
||||
LOG.warning("\nSuppressed exception in pre hook. %s\n", e)
|
||||
pre_data = e
|
||||
else:
|
||||
raise
|
||||
@ -135,7 +134,7 @@ class HookBase(object):
|
||||
except Exception as e:
|
||||
if self.suppress_post_hooks_errors:
|
||||
LOG.warning(
|
||||
_LW("\nSuppressed exception in post hook. %s\n"), e)
|
||||
"\nSuppressed exception in post hook. %s\n", e)
|
||||
post_data = e
|
||||
else:
|
||||
raise
|
||||
|
@ -38,7 +38,7 @@ from manila import context
|
||||
from manila import coordination
|
||||
from manila.data import rpcapi as data_rpcapi
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.i18n import _
|
||||
from manila import manager
|
||||
from manila import quota
|
||||
from manila.share import access
|
||||
@ -204,8 +204,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_driver = self.configuration.share_driver
|
||||
if share_driver in MAPPING:
|
||||
msg_args = {'old': share_driver, 'new': MAPPING[share_driver]}
|
||||
LOG.warning(_LW("Driver path %(old)s is deprecated, update your "
|
||||
"configuration to the new path %(new)s"),
|
||||
LOG.warning("Driver path %(old)s is deprecated, update your "
|
||||
"configuration to the new path %(new)s",
|
||||
msg_args)
|
||||
share_driver = MAPPING[share_driver]
|
||||
|
||||
@ -250,8 +250,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
pool = self.driver.get_pool(share_instance)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to fetch pool name for share: "
|
||||
"%(share)s."),
|
||||
LOG.exception("Failed to fetch pool name for share: "
|
||||
"%(share)s.",
|
||||
{'share': share_instance['id']})
|
||||
return
|
||||
|
||||
@ -277,10 +277,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.driver.check_for_setup_error()
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Error encountered during initialization of driver "
|
||||
"'%(name)s' on '%(host)s' host."), {
|
||||
"name": self.driver.__class__.__name__,
|
||||
"host": self.host,
|
||||
("Error encountered during initialization of driver "
|
||||
"'%(name)s' on '%(host)s' host."), {
|
||||
"name": self.driver.__class__.__name__,
|
||||
"host": self.host,
|
||||
}
|
||||
)
|
||||
self.driver.initialized = False
|
||||
@ -298,8 +298,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
if share_ref.is_busy:
|
||||
LOG.info(
|
||||
_LI("Share instance %(id)s: skipping export, "
|
||||
"because it is busy with an active task: %(task)s."),
|
||||
"Share instance %(id)s: skipping export, "
|
||||
"because it is busy with an active task: %(task)s.",
|
||||
{'id': share_instance['id'],
|
||||
'task': share_ref['task_state']},
|
||||
)
|
||||
@ -307,8 +307,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
if share_instance['status'] != constants.STATUS_AVAILABLE:
|
||||
LOG.info(
|
||||
_LI("Share instance %(id)s: skipping export, "
|
||||
"because it has '%(status)s' status."),
|
||||
"Share instance %(id)s: skipping export, "
|
||||
"because it has '%(status)s' status.",
|
||||
{'id': share_instance['id'],
|
||||
'status': share_instance['status']},
|
||||
)
|
||||
@ -322,9 +322,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
export_locations = self.driver.ensure_share(
|
||||
ctxt, share_instance, share_server=share_server)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Caught exception trying ensure "
|
||||
"share '%(s_id)s'."), {'s_id':
|
||||
share_instance['id']})
|
||||
LOG.exception("Caught exception trying ensure "
|
||||
"share '%(s_id)s'.",
|
||||
{'s_id': share_instance['id']})
|
||||
continue
|
||||
|
||||
if export_locations:
|
||||
@ -341,8 +341,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
ctxt, share_instance['id'], share_server=share_server)
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Unexpected error occurred while updating access "
|
||||
"rules for share instance %(s_id)s."),
|
||||
("Unexpected error occurred while updating access "
|
||||
"rules for share instance %(s_id)s."),
|
||||
{'s_id': share_instance['id']},
|
||||
)
|
||||
|
||||
@ -369,14 +369,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.snapshot_access_helper.update_access_rules(
|
||||
ctxt, snap_instance['id'], share_server)
|
||||
except Exception:
|
||||
LOG.exception(_LE(
|
||||
LOG.exception(
|
||||
"Unexpected error occurred while updating "
|
||||
"access rules for snapshot instance %s."),
|
||||
"access rules for snapshot instance %s.",
|
||||
snap_instance['id'])
|
||||
|
||||
self.publish_service_capabilities(ctxt)
|
||||
LOG.info(_LI("Finished initialization of driver: '%(driver)s"
|
||||
"@%(host)s'"),
|
||||
LOG.info("Finished initialization of driver: '%(driver)s"
|
||||
"@%(host)s'",
|
||||
{"driver": self.driver.__class__.__name__,
|
||||
"host": self.host})
|
||||
|
||||
@ -431,7 +431,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, parent_share_server_id)
|
||||
except exception.ShareServerNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
error(_LE("Parent share server %s does not exist."),
|
||||
error("Parent share server %s does not exist.",
|
||||
parent_share_server_id)
|
||||
|
||||
if parent_share_server['status'] != constants.STATUS_ACTIVE:
|
||||
@ -439,8 +439,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
'id': parent_share_server_id,
|
||||
'status': parent_share_server['status'],
|
||||
}
|
||||
error(_LE("Parent share server %(id)s has invalid status "
|
||||
"'%(status)s'."), error_params)
|
||||
error("Parent share server %(id)s has invalid status "
|
||||
"'%(status)s'.", error_params)
|
||||
raise exception.InvalidShareServer(
|
||||
share_server_id=parent_share_server
|
||||
)
|
||||
@ -478,7 +478,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
error(_LE("Cannot choose compatible share server: %s"),
|
||||
error("Cannot choose compatible share server: %s",
|
||||
e)
|
||||
|
||||
if not compatible_share_server:
|
||||
@ -526,10 +526,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# Create share server on backend with data from db.
|
||||
share_server = self._setup_server(context, share_server,
|
||||
metadata=metadata)
|
||||
LOG.info(_LI("Share server created successfully."))
|
||||
LOG.info("Share server created successfully.")
|
||||
else:
|
||||
LOG.info(_LI("Using preexisting share server: "
|
||||
"'%(share_server_id)s'"),
|
||||
LOG.info("Using preexisting share server: "
|
||||
"'%(share_server_id)s'",
|
||||
{'share_server_id': share_server['id']})
|
||||
return share_server
|
||||
|
||||
@ -640,7 +640,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
error(_LE("Cannot choose compatible share-server: %s"),
|
||||
error("Cannot choose compatible share-server: %s",
|
||||
e)
|
||||
|
||||
if not compatible_share_server:
|
||||
@ -670,10 +670,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# Create share server on backend with data from db.
|
||||
compatible_share_server = self._setup_server(
|
||||
context, compatible_share_server)
|
||||
LOG.info(_LI("Share server created successfully."))
|
||||
LOG.info("Share server created successfully.")
|
||||
else:
|
||||
LOG.info(_LI("Used preexisting share server "
|
||||
"'%(share_server_id)s'"),
|
||||
LOG.info("Used preexisting share server "
|
||||
"'%(share_server_id)s'",
|
||||
{'share_server_id': compatible_share_server['id']})
|
||||
return compatible_share_server, updated_share_group
|
||||
|
||||
@ -963,8 +963,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
(constants.
|
||||
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)})
|
||||
|
||||
LOG.info(_LI("Share Migration for share %s completed "
|
||||
"first phase successfully."),
|
||||
LOG.info("Share Migration for share %s completed "
|
||||
"first phase successfully.",
|
||||
share['id'])
|
||||
else:
|
||||
share = self.db.share_get(
|
||||
@ -972,8 +972,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
if (share['task_state'] ==
|
||||
constants.TASK_STATE_MIGRATION_CANCELLED):
|
||||
LOG.warning(_LW(
|
||||
"Share Migration for share %s was cancelled."),
|
||||
LOG.warning(
|
||||
"Share Migration for share %s was cancelled.",
|
||||
share['id'])
|
||||
|
||||
except Exception:
|
||||
@ -1074,7 +1074,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
except Exception as e:
|
||||
if not isinstance(e, NotImplementedError):
|
||||
LOG.exception(
|
||||
_LE("The driver could not migrate the share %(shr)s"),
|
||||
("The driver could not migrate the share %(shr)s"),
|
||||
{'shr': share_id})
|
||||
|
||||
try:
|
||||
@ -1265,7 +1265,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_snapshot_instance_delete(context, instance['id'])
|
||||
|
||||
self.db.share_instance_delete(context, instance_id)
|
||||
LOG.info(_LI("Share instance %s: deleted successfully."),
|
||||
LOG.info("Share instance %s: deleted successfully.",
|
||||
instance_id)
|
||||
|
||||
self._check_delete_share_server(context, share_instance)
|
||||
@ -1280,8 +1280,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
share_ref = self.db.share_get(context, src_share_instance['share_id'])
|
||||
|
||||
LOG.info(_LI("Received request to finish Share Migration for "
|
||||
"share %s."), share_ref['id'])
|
||||
LOG.info("Received request to finish Share Migration for "
|
||||
"share %s.", share_ref['id'])
|
||||
|
||||
if share_ref['task_state'] == (
|
||||
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
|
||||
@ -1341,8 +1341,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_update(
|
||||
context, dest_share_instance['share_id'], model_update)
|
||||
|
||||
LOG.info(_LI("Share Migration for share %s"
|
||||
" completed successfully."), share_ref['id'])
|
||||
LOG.info("Share Migration for share %s"
|
||||
" completed successfully.", share_ref['id'])
|
||||
|
||||
def _get_extra_specs_from_share_type(self, context, share_type_id):
|
||||
|
||||
@ -1386,8 +1386,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_ref['id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||
|
||||
LOG.info(_LI("Share Migration for share %s"
|
||||
" was cancelled."), share_ref['id'])
|
||||
LOG.info("Share Migration for share %s"
|
||||
" was cancelled.", share_ref['id'])
|
||||
return
|
||||
else:
|
||||
raise exception.ShareMigrationFailed(reason=msg)
|
||||
@ -1489,8 +1489,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_ref['id'],
|
||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||
|
||||
LOG.info(_LI("Share Migration for share %s"
|
||||
" was cancelled."), share_ref['id'])
|
||||
LOG.info("Share Migration for share %s"
|
||||
" was cancelled.", share_ref['id'])
|
||||
|
||||
@utils.require_driver_initialized
|
||||
def migration_get_progress(self, context, src_instance_id,
|
||||
@ -1586,8 +1586,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
error = _LE("Creation of share instance %s failed: "
|
||||
"failed to get share server.")
|
||||
error = ("Creation of share instance %s failed: "
|
||||
"failed to get share server.")
|
||||
LOG.error(error, share_instance_id)
|
||||
self.db.share_instance_update(
|
||||
context, share_instance_id,
|
||||
@ -1610,7 +1610,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Share instance %s failed on creation."),
|
||||
LOG.error("Share instance %s failed on creation.",
|
||||
share_instance_id)
|
||||
detail_data = getattr(e, 'detail_data', {})
|
||||
|
||||
@ -1626,16 +1626,16 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_export_locations_update(
|
||||
context, share_instance['id'], export_locations)
|
||||
else:
|
||||
LOG.warning(_LW('Share instance information in exception '
|
||||
'can not be written to db because it '
|
||||
'contains %s and it is not a dictionary.'),
|
||||
LOG.warning('Share instance information in exception '
|
||||
'can not be written to db because it '
|
||||
'contains %s and it is not a dictionary.',
|
||||
detail_data)
|
||||
self.db.share_instance_update(
|
||||
context, share_instance_id,
|
||||
{'status': constants.STATUS_ERROR}
|
||||
)
|
||||
else:
|
||||
LOG.info(_LI("Share instance %s created successfully."),
|
||||
LOG.info("Share instance %s created successfully.",
|
||||
share_instance_id)
|
||||
share = self.db.share_get(context, share_instance['share_id'])
|
||||
updates = {
|
||||
@ -1742,8 +1742,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed to get share server "
|
||||
"for share replica creation."))
|
||||
LOG.error("Failed to get share server "
|
||||
"for share replica creation.")
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
{'status': constants.STATUS_ERROR,
|
||||
@ -1785,7 +1785,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Share replica %s failed on creation."),
|
||||
LOG.error("Share replica %s failed on creation.",
|
||||
share_replica['id'])
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
@ -1800,8 +1800,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_replica['id'],
|
||||
replica_ref.get('export_locations'))
|
||||
else:
|
||||
msg = _LW('Invalid export locations passed to the share '
|
||||
'manager.')
|
||||
msg = ('Invalid export locations passed to the share '
|
||||
'manager.')
|
||||
LOG.warning(msg)
|
||||
|
||||
if replica_ref.get('replica_state'):
|
||||
@ -1819,7 +1819,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_replica['id'],
|
||||
constants.STATUS_ACTIVE)
|
||||
|
||||
LOG.info(_LI("Share replica %s created successfully."),
|
||||
LOG.info("Share replica %s created successfully.",
|
||||
share_replica['id'])
|
||||
|
||||
@add_hooks
|
||||
@ -1899,7 +1899,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, replica_snapshot['id'])
|
||||
|
||||
self.db.share_replica_delete(context, share_replica['id'])
|
||||
LOG.info(_LI("Share replica %s deleted successfully."),
|
||||
LOG.info("Share replica %s deleted successfully.",
|
||||
share_replica['id'])
|
||||
|
||||
@add_hooks
|
||||
@ -1980,10 +1980,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
for instance in active_replica_snapshot_instances:
|
||||
if instance['status'] in (constants.STATUS_CREATING,
|
||||
constants.STATUS_DELETING):
|
||||
msg = _LI("The replica snapshot instance %(instance)s was "
|
||||
"in %(state)s. Since it was not in %(available)s "
|
||||
"state when the replica was promoted, it will be "
|
||||
"set to %(error)s.")
|
||||
msg = ("The replica snapshot instance %(instance)s was "
|
||||
"in %(state)s. Since it was not in %(available)s "
|
||||
"state when the replica was promoted, it will be "
|
||||
"set to %(error)s.")
|
||||
payload = {
|
||||
'instance': instance['id'],
|
||||
'state': instance['status'],
|
||||
@ -2048,8 +2048,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_replica['id'],
|
||||
updated_replica.get('access_rules_status'))
|
||||
|
||||
LOG.info(_LI("Share replica %s: promoted to active state "
|
||||
"successfully."), share_replica['id'])
|
||||
LOG.info("Share replica %s: promoted to active state "
|
||||
"successfully.", share_replica['id'])
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.replica_state_update_interval)
|
||||
@utils.require_driver_initialized
|
||||
@ -2135,8 +2135,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, replica_list, share_replica, access_rules,
|
||||
available_share_snapshots, share_server=share_server)
|
||||
except Exception:
|
||||
msg = _LE("Driver error when updating replica "
|
||||
"state for replica %s.")
|
||||
msg = ("Driver error when updating replica "
|
||||
"state for replica %s.")
|
||||
LOG.exception(msg, share_replica['id'])
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
@ -2150,8 +2150,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_replica_update(context, share_replica['id'],
|
||||
{'replica_state': replica_state})
|
||||
elif replica_state:
|
||||
msg = (_LW("Replica %(id)s cannot be set to %(state)s "
|
||||
"through update call.") %
|
||||
msg = (("Replica %(id)s cannot be set to %(state)s "
|
||||
"through update call.") %
|
||||
{'id': share_replica['id'], 'state': replica_state})
|
||||
LOG.warning(msg)
|
||||
|
||||
@ -2272,9 +2272,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
if not snapshot_update.get('size'):
|
||||
snapshot_update['size'] = snapshot_ref['share']['size']
|
||||
LOG.warning(_LW("Cannot get the size of the snapshot "
|
||||
"%(snapshot_id)s. Using the size of "
|
||||
"the share instead."),
|
||||
LOG.warning("Cannot get the size of the snapshot "
|
||||
"%(snapshot_id)s. Using the size of "
|
||||
"the share instead.",
|
||||
{'snapshot_id': snapshot_id})
|
||||
|
||||
self._update_quota_usages(context, project_id, {
|
||||
@ -2356,7 +2356,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
except exception.InvalidShare as e:
|
||||
share_manage_set_error_status(
|
||||
_LE("Share can not be unmanaged: %s."), e)
|
||||
("Share can not be unmanaged: %s."), e)
|
||||
return
|
||||
|
||||
try:
|
||||
@ -2370,7 +2370,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# Quota reservation errors here are not fatal, because
|
||||
# unmanage is administrator API and he/she could update user
|
||||
# quota usages later if it's required.
|
||||
LOG.warning(_LW("Failed to update quota usages: %s."), e)
|
||||
LOG.warning("Failed to update quota usages: %s.", e)
|
||||
|
||||
if self.configuration.safe_get('unmanage_remove_access_rules'):
|
||||
try:
|
||||
@ -2382,11 +2382,11 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception as e:
|
||||
share_manage_set_error_status(
|
||||
_LE("Can not remove access rules of share: %s."), e)
|
||||
("Can not remove access rules of share: %s."), e)
|
||||
return
|
||||
|
||||
self.db.share_instance_delete(context, share_instance['id'])
|
||||
LOG.info(_LI("Share %s: unmanaged successfully."), share_id)
|
||||
LOG.info("Share %s: unmanaged successfully.", share_id)
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@ -2396,7 +2396,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
msg = _("Unmanage snapshot is not supported for "
|
||||
"driver_handles_share_servers=True mode.")
|
||||
self.db.share_snapshot_update(context, snapshot_id, status)
|
||||
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."),
|
||||
LOG.error("Share snapshot cannot be unmanaged: %s.",
|
||||
msg)
|
||||
return
|
||||
|
||||
@ -2415,7 +2415,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
msg = _("Unmanage snapshot is not supported for "
|
||||
"share snapshots with share servers.")
|
||||
self.db.share_snapshot_update(context, snapshot_id, status)
|
||||
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."),
|
||||
LOG.error("Share snapshot cannot be unmanaged: %s.",
|
||||
msg)
|
||||
return
|
||||
|
||||
@ -2428,7 +2428,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_server=share_server)
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Cannot remove access rules of snapshot %s."),
|
||||
("Cannot remove access rules of snapshot %s."),
|
||||
snapshot_id)
|
||||
self.db.share_snapshot_update(context, snapshot_id, status)
|
||||
return
|
||||
@ -2437,7 +2437,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.driver.unmanage_snapshot(snapshot_instance)
|
||||
except exception.UnmanageInvalidShareSnapshot as e:
|
||||
self.db.share_snapshot_update(context, snapshot_id, status)
|
||||
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), e)
|
||||
LOG.error("Share snapshot cannot be unmanaged: %s.", e)
|
||||
return
|
||||
|
||||
try:
|
||||
@ -2452,7 +2452,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# Quota reservation errors here are not fatal, because
|
||||
# unmanage is administrator API and he/she could update user
|
||||
# quota usages later if it's required.
|
||||
LOG.warning(_LW("Failed to update quota usages: %s."), e)
|
||||
LOG.warning("Failed to update quota usages: %s.", e)
|
||||
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, snapshot_instance['id'])
|
||||
@ -2502,8 +2502,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
||||
msg = _LE('Share %(share)s could not be reverted '
|
||||
'to snapshot %(snap)s.')
|
||||
msg = ('Share %(share)s could not be reverted '
|
||||
'to snapshot %(snap)s.')
|
||||
msg_args = {'share': share_id, 'snap': snapshot_id}
|
||||
LOG.exception(msg, msg_args)
|
||||
|
||||
@ -2529,8 +2529,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_snapshot_update(
|
||||
context, snapshot_id, {'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
msg = _LI('Share %(share)s reverted to snapshot %(snap)s '
|
||||
'successfully.')
|
||||
msg = ('Share %(share)s reverted to snapshot %(snap)s '
|
||||
'successfully.')
|
||||
msg_args = {'share': share_id, 'snap': snapshot_id}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
@ -2550,14 +2550,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_server=share_server
|
||||
)
|
||||
except exception.ShareResourceNotFound:
|
||||
LOG.warning(_LW("Share instance %s does not exist in the "
|
||||
"backend."), share_instance_id)
|
||||
LOG.warning("Share instance %s does not exist in the "
|
||||
"backend.", share_instance_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if force:
|
||||
msg = _LE("The driver was unable to delete access rules "
|
||||
"for the instance: %s. Will attempt to delete "
|
||||
"the instance anyway.")
|
||||
msg = ("The driver was unable to delete access rules "
|
||||
"for the instance: %s. Will attempt to delete "
|
||||
"the instance anyway.")
|
||||
LOG.error(msg, share_instance_id)
|
||||
exc_context.reraise = False
|
||||
else:
|
||||
@ -2570,16 +2570,16 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.driver.delete_share(context, share_instance,
|
||||
share_server=share_server)
|
||||
except exception.ShareResourceNotFound:
|
||||
LOG.warning(_LW("Share instance %s does not exist in the "
|
||||
"backend."), share_instance_id)
|
||||
LOG.warning("Share instance %s does not exist in the "
|
||||
"backend.", share_instance_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if force:
|
||||
msg = _LE("The driver was unable to delete the share "
|
||||
"instance: %s on the backend. Since this "
|
||||
"operation is forced, the instance will be "
|
||||
"deleted from Manila's database. A cleanup on "
|
||||
"the backend may be necessary.")
|
||||
msg = ("The driver was unable to delete the share "
|
||||
"instance: %s on the backend. Since this "
|
||||
"operation is forced, the instance will be "
|
||||
"deleted from Manila's database. A cleanup on "
|
||||
"the backend may be necessary.")
|
||||
LOG.error(msg, share_instance_id)
|
||||
exc_context.reraise = False
|
||||
else:
|
||||
@ -2589,7 +2589,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
|
||||
self.db.share_instance_delete(context, share_instance_id)
|
||||
LOG.info(_LI("Share instance %s: deleted successfully."),
|
||||
LOG.info("Share instance %s: deleted successfully.",
|
||||
share_instance_id)
|
||||
|
||||
self._check_delete_share_server(context, share_instance)
|
||||
@ -2610,7 +2610,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
if not (self.driver.driver_handles_share_servers and
|
||||
self.configuration.automatic_share_server_cleanup):
|
||||
return
|
||||
LOG.info(_LI("Check for unused share servers to delete."))
|
||||
LOG.info("Check for unused share servers to delete.")
|
||||
updated_before = timeutils.utcnow() - datetime.timedelta(
|
||||
minutes=self.configuration.unused_share_server_cleanup_interval)
|
||||
servers = self.db.share_server_get_all_unused_deletable(ctxt,
|
||||
@ -2697,10 +2697,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_server=share_server)
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Failed to remove access rules for snapshot %s."),
|
||||
("Failed to remove access rules for snapshot %s."),
|
||||
snapshot_instance['id'])
|
||||
LOG.warning(_LW("The driver was unable to remove access rules "
|
||||
"for snapshot %s. Moving on."),
|
||||
LOG.warning("The driver was unable to remove access rules "
|
||||
"for snapshot %s. Moving on.",
|
||||
snapshot_instance['snapshot_id'])
|
||||
|
||||
try:
|
||||
@ -2731,8 +2731,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
user_id=snapshot_ref['user_id'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_LE("Failed to update quota usages while deleting "
|
||||
"snapshot %s."), snapshot_id)
|
||||
LOG.exception("Failed to update quota usages while deleting "
|
||||
"snapshot %s.", snapshot_id)
|
||||
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id,
|
||||
@ -2833,8 +2833,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
||||
msg = _LE('Share %(share)s could not be reverted '
|
||||
'to snapshot %(snap)s.')
|
||||
msg = ('Share %(share)s could not be reverted '
|
||||
'to snapshot %(snap)s.')
|
||||
msg_args = {'share': share_id, 'snap': snapshot_id}
|
||||
LOG.exception(msg, msg_args)
|
||||
|
||||
@ -2862,8 +2862,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, active_replica_snapshot['id'],
|
||||
{'status': constants.STATUS_AVAILABLE})
|
||||
|
||||
msg = _LI('Share %(share)s reverted to snapshot %(snap)s '
|
||||
'successfully.')
|
||||
msg = ('Share %(share)s reverted to snapshot %(snap)s '
|
||||
'successfully.')
|
||||
msg_args = {'share': share_id, 'snap': snapshot_id}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
@ -3039,19 +3039,19 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
except exception.SnapshotResourceNotFound:
|
||||
if replica_snapshot['status'] == constants.STATUS_DELETING:
|
||||
LOG.info(_LI('Snapshot %(snapshot_instance)s on replica '
|
||||
'%(replica)s has been deleted.'), msg_payload)
|
||||
LOG.info('Snapshot %(snapshot_instance)s on replica '
|
||||
'%(replica)s has been deleted.', msg_payload)
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, replica_snapshot['id'])
|
||||
else:
|
||||
LOG.exception(_LE("Replica snapshot %s was not found on "
|
||||
"the backend."), replica_snapshot['id'])
|
||||
LOG.exception("Replica snapshot %s was not found on "
|
||||
"the backend.", replica_snapshot['id'])
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, replica_snapshot['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
except Exception:
|
||||
LOG.exception(_LE("Driver error while updating replica snapshot: "
|
||||
"%s"), replica_snapshot['id'])
|
||||
LOG.exception("Driver error while updating replica snapshot: "
|
||||
"%s", replica_snapshot['id'])
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, replica_snapshot['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
@ -3081,7 +3081,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
@periodic_task.periodic_task(spacing=CONF.periodic_interval)
|
||||
@utils.require_driver_initialized
|
||||
def _report_driver_status(self, context):
|
||||
LOG.info(_LI('Updating share status'))
|
||||
LOG.info('Updating share status')
|
||||
share_stats = self.driver.get_share_stats(refresh=True)
|
||||
if not share_stats:
|
||||
return
|
||||
@ -3309,7 +3309,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(
|
||||
_LE("Share server '%s' failed on deletion."),
|
||||
"Share server '%s' failed on deletion.",
|
||||
server_id)
|
||||
self.db.share_server_update(
|
||||
context, server_id, {'status': constants.STATUS_ERROR})
|
||||
@ -3318,7 +3318,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
_wrapped_delete_share_server()
|
||||
LOG.info(
|
||||
_LI("Share server '%s' has been deleted successfully."),
|
||||
"Share server '%s' has been deleted successfully.",
|
||||
share_server['id'])
|
||||
self.driver.deallocate_network(context, share_server['id'])
|
||||
|
||||
@ -3336,7 +3336,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.driver.extend_share(
|
||||
share_instance, new_size, share_server=share_server)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE("Extend share failed."), resource=share)
|
||||
LOG.exception("Extend share failed.", resource=share)
|
||||
|
||||
try:
|
||||
self.db.share_update(
|
||||
@ -3363,7 +3363,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
}
|
||||
share = self.db.share_update(context, share['id'], share_update)
|
||||
|
||||
LOG.info(_LI("Extend share completed successfully."), resource=share)
|
||||
LOG.info("Extend share completed successfully.", resource=share)
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@ -3396,7 +3396,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
gigabytes=-size_decrease)
|
||||
except Exception as e:
|
||||
error_occurred(
|
||||
e, _LE("Failed to update quota on share shrinking."))
|
||||
e, ("Failed to update quota on share shrinking."))
|
||||
|
||||
try:
|
||||
self.driver.shrink_share(
|
||||
@ -3406,11 +3406,11 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# shouldn't shrink share when this validation error occurs.
|
||||
except Exception as e:
|
||||
if isinstance(e, exception.ShareShrinkingPossibleDataLoss):
|
||||
msg = _LE("Shrink share failed due to possible data loss.")
|
||||
msg = ("Shrink share failed due to possible data loss.")
|
||||
status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR
|
||||
error_params = {'msg': msg, 'status': status}
|
||||
else:
|
||||
error_params = {'msg': _LE("Shrink share failed.")}
|
||||
error_params = {'msg': ("Shrink share failed.")}
|
||||
|
||||
try:
|
||||
error_occurred(e, **error_params)
|
||||
@ -3427,7 +3427,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
}
|
||||
share = self.db.share_update(context, share['id'], share_update)
|
||||
|
||||
LOG.info(_LI("Shrink share completed successfully."), resource=share)
|
||||
LOG.info("Shrink share completed successfully.", resource=share)
|
||||
|
||||
@utils.require_driver_initialized
|
||||
def create_share_group(self, context, share_group_id):
|
||||
@ -3479,15 +3479,15 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed to get share server"
|
||||
" for share group creation."))
|
||||
LOG.error("Failed to get share server"
|
||||
" for share group creation.")
|
||||
self.db.share_group_update(
|
||||
context, share_group_id,
|
||||
{'status': constants.STATUS_ERROR})
|
||||
|
||||
try:
|
||||
# TODO(ameade): Add notification for create.start
|
||||
LOG.info(_LI("Share group %s: creating"), share_group_id)
|
||||
LOG.info("Share group %s: creating", share_group_id)
|
||||
|
||||
model_update, share_update_list = None, None
|
||||
|
||||
@ -3525,7 +3525,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_instance_update(
|
||||
context, share['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
LOG.error(_LE("Share group %s: create failed"), share_group_id)
|
||||
LOG.error("Share group %s: create failed", share_group_id)
|
||||
|
||||
now = timeutils.utcnow()
|
||||
for share in shares:
|
||||
@ -3535,7 +3535,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_group_ref['id'],
|
||||
{'status': status,
|
||||
'created_at': now})
|
||||
LOG.info(_LI("Share group %s: created successfully"), share_group_id)
|
||||
LOG.info("Share group %s: created successfully", share_group_id)
|
||||
|
||||
# TODO(ameade): Add notification for create.end
|
||||
|
||||
@ -3553,7 +3553,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# TODO(ameade): Add notification for delete.start
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Share group %s: deleting"), share_group_id)
|
||||
LOG.info("Share group %s: deleting", share_group_id)
|
||||
share_server = None
|
||||
if share_group_ref.get('share_server_id'):
|
||||
share_server = self.db.share_server_get(
|
||||
@ -3571,11 +3571,11 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context,
|
||||
share_group_ref['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
LOG.error(_LE("Share group %s: delete failed"),
|
||||
LOG.error("Share group %s: delete failed",
|
||||
share_group_ref['id'])
|
||||
|
||||
self.db.share_group_destroy(context, share_group_id)
|
||||
LOG.info(_LI("Share group %s: deleted successfully"), share_group_id)
|
||||
LOG.info("Share group %s: deleted successfully", share_group_id)
|
||||
|
||||
# TODO(ameade): Add notification for delete.end
|
||||
|
||||
@ -3594,7 +3594,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
updated_members_ids = []
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Share group snapshot %s: creating"),
|
||||
LOG.info("Share group snapshot %s: creating",
|
||||
share_group_snapshot_id)
|
||||
share_server = None
|
||||
if snap_ref['share_group'].get('share_server_id'):
|
||||
@ -3611,9 +3611,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# to have here also 'export_locations' when it is supported.
|
||||
member_id = update.pop('id', None)
|
||||
if not member_id:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
"One of share group snapshot '%s' members does not "
|
||||
"have reference ID. Its update was skipped."),
|
||||
"have reference ID. Its update was skipped.",
|
||||
share_group_snapshot_id)
|
||||
continue
|
||||
# TODO(vponomaryov): remove following condition when
|
||||
@ -3660,7 +3660,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context,
|
||||
snap_ref['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
LOG.error(_LE("Share group snapshot %s: create failed"),
|
||||
LOG.error("Share group snapshot %s: create failed",
|
||||
share_group_snapshot_id)
|
||||
|
||||
for member in (snap_ref.get('share_group_snapshot_members') or []):
|
||||
@ -3673,7 +3673,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_group_snapshot_update(
|
||||
context, snap_ref['id'],
|
||||
{'status': status, 'updated_at': now})
|
||||
LOG.info(_LI("Share group snapshot %s: created successfully"),
|
||||
LOG.info("Share group snapshot %s: created successfully",
|
||||
share_group_snapshot_id)
|
||||
|
||||
return snap_ref['id']
|
||||
@ -3691,7 +3691,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
snapshot_update = False
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Share group snapshot %s: deleting"),
|
||||
LOG.info("Share group snapshot %s: deleting",
|
||||
share_group_snapshot_id)
|
||||
|
||||
share_server = None
|
||||
@ -3719,12 +3719,12 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context,
|
||||
snap_ref['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
LOG.error(_LE("Share group snapshot %s: delete failed"),
|
||||
LOG.error("Share group snapshot %s: delete failed",
|
||||
snap_ref['name'])
|
||||
|
||||
self.db.share_group_snapshot_destroy(context, share_group_snapshot_id)
|
||||
|
||||
LOG.info(_LI("Share group snapshot %s: deleted successfully"),
|
||||
LOG.info("Share group snapshot %s: deleted successfully",
|
||||
share_group_snapshot_id)
|
||||
|
||||
def _get_share_replica_dict(self, context, share_replica):
|
||||
|
@ -21,7 +21,7 @@ from oslo_log import log
|
||||
|
||||
from manila.common import constants
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LW
|
||||
from manila.i18n import _
|
||||
from manila.share import api as share_api
|
||||
import manila.utils as utils
|
||||
|
||||
@ -130,16 +130,16 @@ class ShareMigrationHelper(object):
|
||||
try:
|
||||
self.delete_instance_and_wait(new_instance)
|
||||
except Exception:
|
||||
LOG.warning(_LW("Failed to cleanup new instance during generic"
|
||||
" migration for share %s."), self.share['id'])
|
||||
LOG.warning("Failed to cleanup new instance during generic"
|
||||
" migration for share %s.", self.share['id'])
|
||||
|
||||
def cleanup_access_rules(self, share_instance, share_server):
|
||||
|
||||
try:
|
||||
self.revert_access_rules(share_instance, share_server)
|
||||
except Exception:
|
||||
LOG.warning(_LW("Failed to cleanup access rules during generic"
|
||||
" migration for share %s."), self.share['id'])
|
||||
LOG.warning("Failed to cleanup access rules during generic"
|
||||
" migration for share %s.", self.share['id'])
|
||||
|
||||
def revert_access_rules(self, share_instance, share_server):
|
||||
|
||||
|
@ -29,7 +29,6 @@ from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -53,7 +52,7 @@ def create(context, name, extra_specs=None, is_public=True, projects=None):
|
||||
is_public=is_public),
|
||||
projects=projects)
|
||||
except db_exception.DBError:
|
||||
LOG.exception(_LE('DB error.'))
|
||||
LOG.exception('DB error.')
|
||||
raise exception.ShareTypeCreateFailed(name=name,
|
||||
extra_specs=extra_specs)
|
||||
return type_ref
|
||||
@ -86,8 +85,8 @@ def get_all_types(context, inactive=0, search_opts=None):
|
||||
required_extra_specs = get_valid_required_extra_specs(
|
||||
type_args['extra_specs'])
|
||||
except exception.InvalidExtraSpec:
|
||||
LOG.exception(_LE('Share type %(share_type)s has invalid required'
|
||||
' extra specs.'), {'share_type': type_name})
|
||||
LOG.exception('Share type %(share_type)s has invalid required'
|
||||
' extra specs.', {'share_type': type_name})
|
||||
|
||||
type_args['required_extra_specs'] = required_extra_specs
|
||||
|
||||
@ -172,8 +171,8 @@ def get_default_share_type(ctxt=None):
|
||||
# Couldn't find share type with the name in default_share_type
|
||||
# flag, record this issue and move on
|
||||
# TODO(zhiteng) consider add notification to warn admin
|
||||
LOG.exception(_LE('Default share type is not found, '
|
||||
'please check default_share_type config: %s'),
|
||||
LOG.exception('Default share type is not found, '
|
||||
'please check default_share_type config: %s',
|
||||
e)
|
||||
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
from oslo_log import log
|
||||
|
||||
from manila.common import constants
|
||||
from manila.i18n import _LI
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -147,8 +146,8 @@ class ShareSnapshotInstanceAccess(object):
|
||||
self._update_access_rules(context, snapshot_instance,
|
||||
share_server=share_server)
|
||||
else:
|
||||
LOG.info(_LI("Access rules were successfully applied for "
|
||||
"snapshot instance: %s"), snapshot_instance['id'])
|
||||
LOG.info("Access rules were successfully applied for "
|
||||
"snapshot instance: %s", snapshot_instance['id'])
|
||||
|
||||
def _check_needs_refresh(self, context, snapshot_instance_id):
|
||||
|
||||
|
@ -20,7 +20,6 @@ from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -38,7 +37,7 @@ def create(context, name, share_types, group_specs=None, is_public=True,
|
||||
"share_types": share_types},
|
||||
projects=projects)
|
||||
except db_exception.DBError:
|
||||
LOG.exception(_LE('DB error'))
|
||||
LOG.exception('DB error')
|
||||
raise exception.ShareGroupTypeCreateFailed(
|
||||
name=name, group_specs=group_specs)
|
||||
return type_ref
|
||||
@ -142,8 +141,8 @@ def get_default(ctxt=None):
|
||||
return get_by_name(ctxt, name)
|
||||
except exception.ShareGroupTypeNotFoundByName:
|
||||
LOG.exception(
|
||||
_LE("Default share group type '%s' is not found, "
|
||||
"please check 'default_share_group_type' config."),
|
||||
"Default share group type '%s' is not found, "
|
||||
"please check 'default_share_group_type' config.",
|
||||
name,
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user