Implementing the use of _L’x’/i18n markers

Placing the _Lx markers back into the code.  No other cleaner solution has
has been implemented. Patches will be submitted in a series of sub
directories and in a fashion that is manageable.

Partial-Bug: #1384312

Change-Id: I62c708036a9cd5da431b7a95e9cd41167fb5273b
This commit is contained in:
Mike Mason 2014-10-23 13:16:42 +00:00
parent 9a13288cd2
commit 7e7ea5a609
11 changed files with 137 additions and 130 deletions

View File

@ -49,7 +49,7 @@ i18n.enable_lazy()
# Need to register global_opts # Need to register global_opts
from cinder.common import config # noqa from cinder.common import config # noqa
from cinder.i18n import _ from cinder.i18n import _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import service from cinder import service
from cinder import utils from cinder import utils
@ -72,11 +72,11 @@ if __name__ == '__main__':
server = service.WSGIService('osapi_volume') server = service.WSGIService('osapi_volume')
launcher.launch_service(server, workers=server.workers or 1) launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit): except (Exception, SystemExit):
LOG.exception(_('Failed to load osapi_volume')) LOG.exception(_LE('Failed to load osapi_volume'))
for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']: for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
try: try:
launcher.launch_service(service.Service.create(binary=binary)) launcher.launch_service(service.Service.create(binary=binary))
except (Exception, SystemExit): except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary) LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait() launcher.wait()

View File

@ -56,7 +56,7 @@ from cinder import i18n
i18n.enable_lazy() i18n.enable_lazy()
from cinder import context from cinder import context
from cinder import db from cinder import db
from cinder.i18n import _ from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
@ -129,7 +129,8 @@ if __name__ == '__main__':
volume_ref, volume_ref,
'exists', extra_usage_info=extra_info) 'exists', extra_usage_info=extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send exists notification for volume %s.") % LOG.error(_LE("Failed to send exists notification"
" for volume %s.") %
volume_ref.id) volume_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
@ -156,8 +157,8 @@ if __name__ == '__main__':
volume_ref, volume_ref,
'create.end', extra_usage_info=local_extra_info) 'create.end', extra_usage_info=local_extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send create notification for " LOG.error(_LE("Failed to send create notification for "
"volume %s.") % volume_ref.id) "volume %s.") % volume_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
if (CONF.send_actions and volume_ref.deleted_at and if (CONF.send_actions and volume_ref.deleted_at and
@ -183,8 +184,8 @@ if __name__ == '__main__':
volume_ref, volume_ref,
'delete.end', extra_usage_info=local_extra_info) 'delete.end', extra_usage_info=local_extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send delete notification for volume " LOG.error(_LE("Failed to send delete notification for volume "
"%s.") % volume_ref.id) "%s.") % volume_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
snapshots = db.snapshot_get_active_by_window(admin_context, snapshots = db.snapshot_get_active_by_window(admin_context,
@ -203,7 +204,8 @@ if __name__ == '__main__':
'exists', 'exists',
extra_info) extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send exists notification for snapshot %s.") LOG.error(_LE("Failed to send exists notification "
"for snapshot %s.")
% snapshot_ref.id) % snapshot_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
@ -230,8 +232,8 @@ if __name__ == '__main__':
snapshot_ref, snapshot_ref,
'create.end', extra_usage_info=local_extra_info) 'create.end', extra_usage_info=local_extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send create notification for snapshot " LOG.error(_LE("Failed to send create notification for snapshot"
"%s.") % snapshot_ref.id) "%s.") % snapshot_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
if (CONF.send_actions and snapshot_ref.deleted_at and if (CONF.send_actions and snapshot_ref.deleted_at and
@ -257,8 +259,8 @@ if __name__ == '__main__':
snapshot_ref, snapshot_ref,
'delete.end', extra_usage_info=local_extra_info) 'delete.end', extra_usage_info=local_extra_info)
except Exception as e: except Exception as e:
LOG.error(_("Failed to send delete notification for snapshot " LOG.error(_LE("Failed to send delete notification for snapshot"
"%s.") % snapshot_ref.id) "%s.") % snapshot_ref.id)
print(traceback.format_exc(e)) print(traceback.format_exc(e))
print(_("Volume usage audit completed")) print(_("Volume usage audit completed"))

View File

@ -25,7 +25,7 @@ from cinder.backup import rpcapi as backup_rpcapi
from cinder import context from cinder import context
from cinder.db import base from cinder.db import base
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import excutils from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
import cinder.policy import cinder.policy
@ -139,9 +139,9 @@ class API(base.Base):
for over in overs: for over in overs:
if 'gigabytes' in over: if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create " msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of " "%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)") "%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id, LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'], 's_size': volume['size'],
'd_consumed': _consumed(over), 'd_consumed': _consumed(over),
@ -151,9 +151,9 @@ class API(base.Base):
consumed=_consumed('backup_gigabytes'), consumed=_consumed('backup_gigabytes'),
quota=quotas['backup_gigabytes']) quota=quotas['backup_gigabytes'])
elif 'backups' in over: elif 'backups' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create " msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"backups (%(d_consumed)d backups " "backups (%(d_consumed)d backups "
"already consumed)") "already consumed)")
LOG.warn(msg % {'s_pid': context.project_id, LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)}) 'd_consumed': _consumed(over)})
@ -209,8 +209,8 @@ class API(base.Base):
name = 'restore_backup_%s' % backup_id name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup' description = 'auto-created_from_restore_from_backup'
LOG.info(_("Creating volume of %(size)s GB for restore of " LOG.info(_LI("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s"), "backup %(backup_id)s"),
{'size': size, 'backup_id': backup_id}, {'size': size, 'backup_id': backup_id},
context=context) context=context)
volume = self.volume_api.create(context, size, name, description) volume = self.volume_api.create(context, size, name, description)
@ -236,8 +236,8 @@ class API(base.Base):
{'volume_size': volume['size'], 'size': size}) {'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
LOG.info(_("Overwriting volume %(volume_id)s with restore of " LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"), "backup %(backup_id)s"),
{'volume_id': volume_id, 'backup_id': backup_id}, {'volume_id': volume_id, 'backup_id': backup_id},
context=context) context=context)

View File

@ -22,7 +22,7 @@ import six
from cinder.db import base from cinder.db import base
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI
from cinder.openstack.common import jsonutils from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -55,7 +55,7 @@ class BackupMetadataAPI(base.Base):
try: try:
jsonutils.dumps(value) jsonutils.dumps(value)
except TypeError: except TypeError:
LOG.info(_("Value with type=%s is not serializable") % LOG.info(_LI("Value with type=%s is not serializable") %
type(value)) type(value))
return False return False
@ -75,8 +75,8 @@ class BackupMetadataAPI(base.Base):
for key, value in meta: for key, value in meta:
# Exclude fields that are "not JSON serializable" # Exclude fields that are "not JSON serializable"
if not self._is_serializable(value): if not self._is_serializable(value):
LOG.info(_("Unable to serialize field '%s' - excluding " LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup") % (key)) "from backup") % (key))
continue continue
container[type_tag][key] = value container[type_tag][key] = value
@ -98,8 +98,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta: for entry in meta:
# Exclude fields that are "not JSON serializable" # Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]): if not self._is_serializable(meta[entry]):
LOG.info(_("Unable to serialize field '%s' - excluding " LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup") % (entry)) "from backup") % (entry))
continue continue
container[type_tag][entry] = meta[entry] container[type_tag][entry] = meta[entry]
@ -122,8 +122,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta: for entry in meta:
# Exclude fields that are "not JSON serializable" # Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value): if not self._is_serializable(entry.value):
LOG.info(_("Unable to serialize field '%s' - " LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup") % (entry)) "excluding from backup") % (entry))
continue continue
container[type_tag][entry.key] = entry.value container[type_tag][entry.key] = entry.value

View File

@ -53,7 +53,7 @@ from oslo.config import cfg
from cinder.backup.driver import BackupDriver from cinder.backup.driver import BackupDriver
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils from cinder.openstack.common import strutils
@ -177,8 +177,8 @@ class CephBackupDriver(BackupDriver):
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else: else:
LOG.info(_("RBD striping not supported - ignoring configuration " LOG.info(_LI("RBD striping not supported - ignoring configuration "
"settings for rbd striping")) "settings for rbd striping"))
self.rbd_stripe_count = 0 self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0 self.rbd_stripe_unit = 0
@ -432,15 +432,15 @@ class CephBackupDriver(BackupDriver):
snap, rem = self._delete_backup_snapshot(client, base_name, snap, rem = self._delete_backup_snapshot(client, base_name,
backup_id) backup_id)
if rem: if rem:
msg = (_("Backup base image of volume %(volume)s still " msg = (_LI("Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base " "has %(snapshots)s snapshots so skipping base "
"image delete.") % "image delete.") %
{'snapshots': rem, 'volume': volume_id}) {'snapshots': rem, 'volume': volume_id})
LOG.info(msg) LOG.info(msg)
return return
LOG.info(_("Deleting backup base image='%(basename)s' of " LOG.info(_LI("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.") % "volume %(volume)s.") %
{'basename': base_name, 'volume': volume_id}) {'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots # Delete base if no more snapshots
try: try:
@ -448,16 +448,17 @@ class CephBackupDriver(BackupDriver):
except self.rbd.ImageBusy as exc: except self.rbd.ImageBusy as exc:
# Allow a retry if the image is busy # Allow a retry if the image is busy
if retries > 0: if retries > 0:
LOG.info(_("Backup image of volume %(volume)s is " LOG.info(_LI("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) " "busy, retrying %(retries)s more time(s) "
"in %(delay)ss.") % "in %(delay)ss.") %
{'retries': retries, {'retries': retries,
'delay': delay, 'delay': delay,
'volume': volume_id}) 'volume': volume_id})
eventlet.sleep(delay) eventlet.sleep(delay)
else: else:
LOG.error(_("Max retries reached deleting backup " LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume %(volume)s.") "%(basename)s image of volume "
"%(volume)s.")
% {'volume': volume_id, % {'volume': volume_id,
'basename': base_name}) 'basename': base_name})
raise exc raise exc
@ -491,7 +492,7 @@ class CephBackupDriver(BackupDriver):
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
except OSError as e: except OSError as e:
LOG.error(_("Pipe1 failed - %s ") % unicode(e)) LOG.error(_LE("Pipe1 failed - %s ") % unicode(e))
raise raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work # NOTE(dosaboy): ensure that the pipe is blocking. This is to work
@ -505,7 +506,7 @@ class CephBackupDriver(BackupDriver):
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
except OSError as e: except OSError as e:
LOG.error(_("Pipe2 failed - %s ") % unicode(e)) LOG.error(_LE("Pipe2 failed - %s ") % unicode(e))
raise raise
p1.stdout.close() p1.stdout.close()
@ -969,8 +970,8 @@ class CephBackupDriver(BackupDriver):
dest_user=rbd_user, dest_conf=rbd_conf, dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point) src_snap=restore_point)
except exception.BackupRBDOperationFailed: except exception.BackupRBDOperationFailed:
LOG.exception(_("Differential restore failed, trying full " LOG.exception(_LE("Differential restore failed, trying full "
"restore")) "restore"))
raise raise
# If the volume we are restoring to is larger than the backup volume, # If the volume we are restoring to is larger than the backup volume,
@ -1082,8 +1083,9 @@ class CephBackupDriver(BackupDriver):
return True, restore_point return True, restore_point
else: else:
LOG.info(_("No restore point found for backup='%(backup)s' of " LOG.info(_LI("No restore point found for "
"volume %(volume)s - forcing full copy.") % "backup='%(backup)s' of "
"volume %(volume)s - forcing full copy.") %
{'backup': backup['id'], {'backup': backup['id'],
'volume': backup['volume_id']}) 'volume': backup['volume_id']})
@ -1170,8 +1172,8 @@ class CephBackupDriver(BackupDriver):
LOG.debug('Restore to volume %s finished successfully.' % LOG.debug('Restore to volume %s finished successfully.' %
volume_id) volume_id)
except exception.BackupOperationError as e: except exception.BackupOperationError as e:
LOG.error(_('Restore to volume %(volume)s finished with error - ' LOG.error(_LE('Restore to volume %(volume)s finished with error - '
'%(error)s.') % {'error': e, 'volume': volume_id}) '%(error)s.') % {'error': e, 'volume': volume_id})
raise raise
def delete(self, backup): def delete(self, backup):
@ -1182,8 +1184,8 @@ class CephBackupDriver(BackupDriver):
try: try:
self._try_delete_base_image(backup['id'], backup['volume_id']) self._try_delete_base_image(backup['id'], backup['volume_id'])
except self.rbd.ImageNotFound: except self.rbd.ImageNotFound:
msg = (_("RBD image for backup %(backup)s of volume %(volume)s " msg = (_LW("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.") "not found. Deleting backup metadata.")
% {'backup': backup['id'], 'volume': backup['volume_id']}) % {'backup': backup['id'], 'volume': backup['volume_id']})
LOG.warning(msg) LOG.warning(msg)
delete_failed = True delete_failed = True
@ -1192,8 +1194,9 @@ class CephBackupDriver(BackupDriver):
VolumeMetadataBackup(client, backup['id']).remove_if_exists() VolumeMetadataBackup(client, backup['id']).remove_if_exists()
if delete_failed: if delete_failed:
LOG.info(_("Delete of backup '%(backup)s' for volume '%(volume)s' " LOG.info(_LI("Delete of backup '%(backup)s' "
"finished with warning.") % "for volume '%(volume)s' "
"finished with warning.") %
{'backup': backup['id'], 'volume': backup['volume_id']}) {'backup': backup['id'], 'volume': backup['volume_id']})
else: else:
LOG.debug("Delete of backup '%(backup)s' for volume " LOG.debug("Delete of backup '%(backup)s' for volume "

View File

@ -43,13 +43,12 @@ from swiftclient import client as swift
from cinder.backup.driver import BackupDriver from cinder.backup.driver import BackupDriver
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils from cinder.openstack.common import timeutils
from cinder.openstack.common import units from cinder.openstack.common import units
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [ swiftbackup_service_opts = [
@ -157,8 +156,8 @@ class SwiftBackupDriver(BackupDriver):
CONF.backup_swift_auth)) CONF.backup_swift_auth))
if CONF.backup_swift_auth == 'single_user': if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None: if CONF.backup_swift_user is None:
LOG.error(_("single_user auth mode enabled, " LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set") "but %(param)s not set")
% {'param': 'backup_swift_user'}) % {'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user') raise exception.ParameterNotFound(param='backup_swift_user')
self.conn = swift.Connection( self.conn = swift.Connection(
@ -392,7 +391,7 @@ class SwiftBackupDriver(BackupDriver):
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception( LOG.exception(
_("Backup volume metadata to swift failed: %s") % _LE("Backup volume metadata to swift failed: %s") %
six.text_type(err)) six.text_type(err))
self.delete(backup) self.delete(backup)
@ -448,8 +447,9 @@ class SwiftBackupDriver(BackupDriver):
try: try:
fileno = volume_file.fileno() fileno = volume_file.fileno()
except IOError: except IOError:
LOG.info("volume_file does not support fileno() so skipping " LOG.info(_LI("volume_file does not support "
"fsync()") "fileno() so skipping"
"fsync()"))
else: else:
os.fsync(fileno) os.fsync(fileno)
@ -514,8 +514,8 @@ class SwiftBackupDriver(BackupDriver):
try: try:
swift_object_names = self._generate_object_names(backup) swift_object_names = self._generate_object_names(backup)
except Exception: except Exception:
LOG.warn(_('swift error while listing objects, continuing' LOG.warn(_LW('swift error while listing objects, continuing'
' with delete')) ' with delete'))
for swift_object_name in swift_object_names: for swift_object_name in swift_object_names:
try: try:
@ -523,8 +523,9 @@ class SwiftBackupDriver(BackupDriver):
except socket.error as err: except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err) raise exception.SwiftConnectionFailed(reason=err)
except Exception: except Exception:
LOG.warn(_('swift error while deleting object %s, ' LOG.warn(_LW('swift error while deleting object %s, '
'continuing with delete') % swift_object_name) 'continuing with delete')
% swift_object_name)
else: else:
LOG.debug('deleted swift object: %(swift_object_name)s' LOG.debug('deleted swift object: %(swift_object_name)s'
' in container: %(container)s' % ' in container: %(container)s' %

View File

@ -33,7 +33,7 @@ from oslo.config import cfg
from cinder.backup.driver import BackupDriver from cinder.backup.driver import BackupDriver
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils from cinder.openstack.common import processutils
from cinder import utils from cinder import utils
@ -249,9 +249,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
hardlink_path, hardlink_path,
run_as_root=True) run_as_root=True)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to remove backup hardlink' err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
' from %(vpath)s to %(bpath)s.\n' ' from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.') 'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': volume_id, % {'vol_id': volume_id,
'vpath': volume_path, 'vpath': volume_path,
'bpath': hardlink_path, 'bpath': hardlink_path,
@ -528,8 +528,8 @@ class TSMBackupDriver(BackupDriver):
# log error if tsm cannot delete the backup object # log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup # but do not raise exception so that cinder backup
# object can be removed. # object can be removed.
err = (_('delete: %(vol_id)s failed with ' err = (_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s') 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id, % {'vol_id': volume_id,
'out': out, 'out': out,
'err': err}) 'err': err})

View File

@ -40,7 +40,7 @@ from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi from cinder.backup import rpcapi as backup_rpcapi
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager from cinder import manager
from cinder.openstack.common import excutils from cinder.openstack.common import excutils
from cinder.openstack.common import importutils from cinder.openstack.common import importutils
@ -115,7 +115,7 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.debug("Got backend '%s'." % (backend)) LOG.debug("Got backend '%s'." % (backend))
return backend return backend
LOG.info(_("Backend not found in hostname (%s) so using default.") % LOG.info(_LI("Backend not found in hostname (%s) so using default.") %
(host)) (host))
if 'default' not in self.volume_managers: if 'default' not in self.volume_managers:
@ -166,7 +166,7 @@ class BackupManager(manager.SchedulerDependentManager):
self.volume_managers['default'] = default self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver): def _init_volume_driver(self, ctxt, driver):
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s).") % LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s).") %
{'driver_name': driver.__class__.__name__, {'driver_name': driver.__class__.__name__,
'version': driver.get_version()}) 'version': driver.get_version()})
try: try:
@ -192,19 +192,19 @@ class BackupManager(manager.SchedulerDependentManager):
for mgr in self.volume_managers.itervalues(): for mgr in self.volume_managers.itervalues():
self._init_volume_driver(ctxt, mgr.driver) self._init_volume_driver(ctxt, mgr.driver)
LOG.info(_("Cleaning up incomplete backup operations.")) LOG.info(_LI("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host) volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes: for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend') volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host) backend = self._get_volume_backend(host=volume_host)
if volume['status'] == 'backing-up': if volume['status'] == 'backing-up':
LOG.info(_('Resetting volume %s to available ' LOG.info(_LI('Resetting volume %s to available '
'(was backing-up).') % volume['id']) '(was backing-up).') % volume['id'])
mgr = self._get_manager(backend) mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id']) mgr.detach_volume(ctxt, volume['id'])
if volume['status'] == 'restoring-backup': if volume['status'] == 'restoring-backup':
LOG.info(_('Resetting volume %s to error_restoring ' LOG.info(_LI('Resetting volume %s to error_restoring '
'(was restoring-backup).') % volume['id']) '(was restoring-backup).') % volume['id'])
mgr = self._get_manager(backend) mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id']) mgr.detach_volume(ctxt, volume['id'])
self.db.volume_update(ctxt, volume['id'], self.db.volume_update(ctxt, volume['id'],
@ -215,18 +215,19 @@ class BackupManager(manager.SchedulerDependentManager):
backups = self.db.backup_get_all_by_host(ctxt, self.host) backups = self.db.backup_get_all_by_host(ctxt, self.host)
for backup in backups: for backup in backups:
if backup['status'] == 'creating': if backup['status'] == 'creating':
LOG.info(_('Resetting backup %s to error (was creating).') LOG.info(_LI('Resetting backup %s to error (was creating).')
% backup['id']) % backup['id'])
err = 'incomplete backup reset on manager restart' err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error', self.db.backup_update(ctxt, backup['id'], {'status': 'error',
'fail_reason': err}) 'fail_reason': err})
if backup['status'] == 'restoring': if backup['status'] == 'restoring':
LOG.info(_('Resetting backup %s to available (was restoring).') LOG.info(_LI('Resetting backup %s to '
' available (was restoring).')
% backup['id']) % backup['id'])
self.db.backup_update(ctxt, backup['id'], self.db.backup_update(ctxt, backup['id'],
{'status': 'available'}) {'status': 'available'})
if backup['status'] == 'deleting': if backup['status'] == 'deleting':
LOG.info(_('Resuming delete on backup: %s.') % backup['id']) LOG.info(_LI('Resuming delete on backup: %s.') % backup['id'])
self.delete_backup(ctxt, backup['id']) self.delete_backup(ctxt, backup['id'])
def create_backup(self, context, backup_id): def create_backup(self, context, backup_id):
@ -234,8 +235,8 @@ class BackupManager(manager.SchedulerDependentManager):
backup = self.db.backup_get(context, backup_id) backup = self.db.backup_get(context, backup_id)
volume_id = backup['volume_id'] volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id) volume = self.db.volume_get(context, volume_id)
LOG.info(_('Create backup started, backup: %(backup_id)s ' LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') % 'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup_id, 'volume_id': volume_id})
volume_host = volume_utils.extract_host(volume['host'], 'backend') volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host) backend = self._get_volume_backend(host=volume_host)
@ -292,12 +293,12 @@ class BackupManager(manager.SchedulerDependentManager):
'size': volume['size'], 'size': volume['size'],
'availability_zone': 'availability_zone':
self.az}) self.az})
LOG.info(_('Create backup finished. backup: %s.'), backup_id) LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
def restore_backup(self, context, backup_id, volume_id): def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service.""" """Restore volume backups from configured backup service."""
LOG.info(_('Restore backup started, backup: %(backup_id)s ' LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') % 'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id) backup = self.db.backup_get(context, backup_id)
@ -330,9 +331,9 @@ class BackupManager(manager.SchedulerDependentManager):
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']: if volume['size'] > backup['size']:
LOG.info(_('Volume: %(vol_id)s, size: %(vol_size)d is ' LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, ' 'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'), 'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'], {'vol_id': volume['id'],
'vol_size': volume['size'], 'vol_size': volume['size'],
'backup_id': backup['id'], 'backup_id': backup['id'],
@ -372,8 +373,8 @@ class BackupManager(manager.SchedulerDependentManager):
self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'available'}) self.db.backup_update(context, backup_id, {'status': 'available'})
LOG.info(_('Restore backup finished, backup %(backup_id)s restored' LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.') % ' to volume %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, context, backup_id): def delete_backup(self, context, backup_id):
@ -391,7 +392,7 @@ class BackupManager(manager.SchedulerDependentManager):
'fail_reason': 'fail_reason':
unicode(err)}) unicode(err)})
LOG.info(_('Delete backup started, backup: %s.'), backup_id) LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id) backup = self.db.backup_get(context, backup_id)
self.db.backup_update(context, backup_id, {'host': self.host}) self.db.backup_update(context, backup_id, {'host': self.host})
@ -441,7 +442,7 @@ class BackupManager(manager.SchedulerDependentManager):
**reserve_opts) **reserve_opts)
except Exception: except Exception:
reservations = None reservations = None
LOG.exception(_("Failed to update usages deleting backup")) LOG.exception(_LE("Failed to update usages deleting backup"))
context = context.elevated() context = context.elevated()
self.db.backup_destroy(context, backup_id) self.db.backup_destroy(context, backup_id)
@ -451,7 +452,7 @@ class BackupManager(manager.SchedulerDependentManager):
QUOTAS.commit(context, reservations, QUOTAS.commit(context, reservations,
project_id=backup['project_id']) project_id=backup['project_id'])
LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id) LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
def export_record(self, context, backup_id): def export_record(self, context, backup_id):
"""Export all volume backup metadata details to allow clean import. """Export all volume backup metadata details to allow clean import.
@ -466,7 +467,7 @@ class BackupManager(manager.SchedulerDependentManager):
:returns: 'backup_service' describing the needed driver. :returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup :raises: InvalidBackup
""" """
LOG.info(_('Export record started, backup: %s.'), backup_id) LOG.info(_LI('Export record started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id) backup = self.db.backup_get(context, backup_id)
@ -502,7 +503,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = unicode(err) msg = unicode(err)
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
LOG.info(_('Export record finished, backup %s exported.'), backup_id) LOG.info(_LI('Export record finished, backup %s exported.'), backup_id)
return backup_record return backup_record
def import_record(self, def import_record(self,
@ -521,7 +522,7 @@ class BackupManager(manager.SchedulerDependentManager):
:raises: InvalidBackup :raises: InvalidBackup
:raises: ServiceNotFound :raises: ServiceNotFound
""" """
LOG.info(_('Import record started, backup_url: %s.'), backup_url) LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup? # Can we import this backup?
if (backup_service != self.driver_name): if (backup_service != self.driver_name):
@ -588,11 +589,11 @@ class BackupManager(manager.SchedulerDependentManager):
if isinstance(backup_service, driver.BackupDriverWithVerify): if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup_id) backup_service.verify(backup_id)
else: else:
LOG.warn(_('Backup service %(service)s does not support ' LOG.warn(_LW('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. ' 'verify. Backup id %(id)s is not verified. '
'Skipping verify.') % {'service': 'Skipping verify.') % {'service':
self.driver_name, self.driver_name,
'id': backup_id}) 'id': backup_id})
except exception.InvalidBackup as err: except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id, self.db.backup_update(context, backup_id,
@ -600,8 +601,8 @@ class BackupManager(manager.SchedulerDependentManager):
'fail_reason': 'fail_reason':
unicode(err)}) unicode(err)})
LOG.info(_('Import record id %s metadata from driver ' LOG.info(_LI('Import record id %s metadata from driver '
'finished.') % backup_id) 'finished.') % backup_id)
def reset_status(self, context, backup_id, status): def reset_status(self, context, backup_id, status):
"""Reset volume backup status. """Reset volume backup status.
@ -613,8 +614,8 @@ class BackupManager(manager.SchedulerDependentManager):
:raises: BackupVerifyUnsupportedDriver :raises: BackupVerifyUnsupportedDriver
:raises: AttributeError :raises: AttributeError
""" """
LOG.info(_('Reset backup status started, backup_id: ' LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'), '%(backup_id)s, status: %(status)s.'),
{'backup_id': backup_id, {'backup_id': backup_id,
'status': status}) 'status': status})
try: try:
@ -625,11 +626,11 @@ class BackupManager(manager.SchedulerDependentManager):
utils.require_driver_initialized(self.driver) utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized: except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Backup driver has not been initialized")) LOG.exception(_LE("Backup driver has not been initialized"))
backup = self.db.backup_get(context, backup_id) backup = self.db.backup_get(context, backup_id)
backup_service = self._map_service_to_driver(backup['service']) backup_service = self._map_service_to_driver(backup['service'])
LOG.info(_('Backup service: %s.'), backup_service) LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None: if backup_service is not None:
configured_service = self.driver_name configured_service = self.driver_name
if backup_service != configured_service: if backup_service != configured_service:
@ -695,4 +696,4 @@ class BackupManager(manager.SchedulerDependentManager):
notifier_info = {'id': backup_id, 'update': {'status': status}} notifier_info = {'id': backup_id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate') notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups" + '.reset_status.end', notifier.info(context, "backups" + '.reset_status.end',
notifier_info) notifier_info)

View File

@ -16,7 +16,7 @@ from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, Integer, String, Table, ForeignKey from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
from cinder.i18n import _ from cinder.i18n import _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -47,7 +47,7 @@ def upgrade(migrate_engine):
try: try:
quota_classes.create() quota_classes.create()
except Exception: except Exception:
LOG.error(_("Table |%s| not created!"), repr(quota_classes)) LOG.error(_LE("Table |%s| not created!"), repr(quota_classes))
raise raise
quota_usages = Table('quota_usages', meta, quota_usages = Table('quota_usages', meta,
@ -72,7 +72,7 @@ def upgrade(migrate_engine):
try: try:
quota_usages.create() quota_usages.create()
except Exception: except Exception:
LOG.error(_("Table |%s| not created!"), repr(quota_usages)) LOG.error(_LE("Table |%s| not created!"), repr(quota_usages))
raise raise
reservations = Table('reservations', meta, reservations = Table('reservations', meta,
@ -103,7 +103,7 @@ def upgrade(migrate_engine):
try: try:
reservations.create() reservations.create()
except Exception: except Exception:
LOG.error(_("Table |%s| not created!"), repr(reservations)) LOG.error(_LE("Table |%s| not created!"), repr(reservations))
raise raise
@ -125,25 +125,25 @@ def downgrade(migrate_engine):
fkey = ForeignKeyConstraint(**params) fkey = ForeignKeyConstraint(**params)
fkey.drop() fkey.drop()
except Exception: except Exception:
LOG.error(_("Dropping foreign key reservations_ibfk_1 failed.")) LOG.error(_LE("Dropping foreign key reservations_ibfk_1 failed."))
quota_classes = Table('quota_classes', meta, autoload=True) quota_classes = Table('quota_classes', meta, autoload=True)
try: try:
quota_classes.drop() quota_classes.drop()
except Exception: except Exception:
LOG.error(_("quota_classes table not dropped")) LOG.error(_LE("quota_classes table not dropped"))
raise raise
quota_usages = Table('quota_usages', meta, autoload=True) quota_usages = Table('quota_usages', meta, autoload=True)
try: try:
quota_usages.drop() quota_usages.drop()
except Exception: except Exception:
LOG.error(_("quota_usages table not dropped")) LOG.error(_LE("quota_usages table not dropped"))
raise raise
reservations = Table('reservations', meta, autoload=True) reservations = Table('reservations', meta, autoload=True)
try: try:
reservations.drop() reservations.drop()
except Exception: except Exception:
LOG.error(_("reservations table not dropped")) LOG.error(_LE("reservations table not dropped"))
raise raise

View File

@ -13,7 +13,7 @@
from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table from sqlalchemy import MetaData, String, Table
from cinder.i18n import _ from cinder.i18n import _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -29,7 +29,7 @@ def upgrade(migrate_engine):
try: try:
table.drop() table.drop()
except Exception: except Exception:
LOG.error(_("migrations table not dropped")) LOG.error(_LE("migrations table not dropped"))
raise raise
@ -59,5 +59,5 @@ def downgrade(migrate_engine):
try: try:
table.create() table.create()
except Exception: except Exception:
LOG.error(_("Table |%s| not created"), repr(table)) LOG.error(_LE("Table |%s| not created"), repr(table))
raise raise

View File

@ -17,7 +17,7 @@ import datetime
from oslo.config import cfg from oslo.config import cfg
from sqlalchemy import MetaData, Table from sqlalchemy import MetaData, Table
from cinder.i18n import _ from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
# Get default values via config. The defaults will either # Get default values via config. The defaults will either
@ -47,8 +47,8 @@ def upgrade(migrate_engine):
# Do not add entries if there are already 'default' entries. We don't # Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added. # want to write over something the user added.
if rows: if rows:
LOG.info(_("Found existing 'default' entries in the quota_classes " LOG.info(_LI("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values.")) "table. Skipping insertion of default values."))
return return
try: try:
@ -71,9 +71,9 @@ def upgrade(migrate_engine):
'resource': 'gigabytes', 'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes, 'hard_limit': CONF.quota_gigabytes,
'deleted': False, }) 'deleted': False, })
LOG.info(_("Added default quota class data into the DB.")) LOG.info(_LI("Added default quota class data into the DB."))
except Exception: except Exception:
LOG.error(_("Default quota class data not inserted into the DB.")) LOG.error(_LE("Default quota class data not inserted into the DB."))
raise raise