Enable H904 check

H904  String interpolation should be delayed to be handled by the
logging code, rather than being done at the point of the logging call.
Use ',' instead of '%'.

See: https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages

Change-Id: Ib5781b837ba60028dce6dddd5b198647e002c8f1
This commit is contained in:
Ngo Quoc Cuong 2017-07-05 04:36:25 -04:00
parent 7b71585aa1
commit 8ca93ac3f7
18 changed files with 47 additions and 48 deletions

View File

@ -55,7 +55,7 @@ class CapabilitiesFilter(filters.BaseBackendFilter):
try: try:
cap = cap[scope[index]] cap = cap[scope[index]]
except (TypeError, KeyError): except (TypeError, KeyError):
LOG.debug("Backend doesn't provide capability '%(cap)s' " % LOG.debug("Backend doesn't provide capability '%(cap)s' ",
{'cap': scope[index]}) {'cap': scope[index]})
return False return False

View File

@ -52,7 +52,7 @@ class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter):
pass_msg = "passes" if passes else "fails" pass_msg = "passes" if passes else "fails"
LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried " LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried "
"backends: %(backends)s" % {'backend': backend, "backends: %(backends)s", {'backend': backend,
'pass_msg': pass_msg, 'pass_msg': pass_msg,
'backends': backends}) 'backends': backends})
return passes return passes

View File

@ -916,7 +916,7 @@ class VMAXMasking(object):
serial_number, storagegroup_name) serial_number, storagegroup_name)
if not masking_list: if not masking_list:
LOG.debug("No masking views associated with storage group " LOG.debug("No masking views associated with storage group "
"%(sg_name)s" % {'sg_name': storagegroup_name}) "%(sg_name)s", {'sg_name': storagegroup_name})
@coordination.synchronized("emc-sg-{sg_name}") @coordination.synchronized("emc-sg-{sg_name}")
def do_remove_volume_from_sg(sg_name): def do_remove_volume_from_sg(sg_name):
@ -1141,7 +1141,7 @@ class VMAXMasking(object):
num_vol_in_sg = self.rest.get_num_vols_in_sg( num_vol_in_sg = self.rest.get_num_vols_in_sg(
serial_number, storagegroup_name) serial_number, storagegroup_name)
LOG.debug("There are %(num_vol)d volumes remaining in the storage " LOG.debug("There are %(num_vol)d volumes remaining in the storage "
"group %(sg_name)s." % "group %(sg_name)s.",
{'num_vol': num_vol_in_sg, {'num_vol': num_vol_in_sg,
'sg_name': storagegroup_name}) 'sg_name': storagegroup_name})

View File

@ -1438,8 +1438,8 @@ class HPE3PARCommon(object):
for license in valid_licenses: for license in valid_licenses:
if license_to_check in license.get('name'): if license_to_check in license.get('name'):
return True return True
LOG.debug(("'%(capability)s' requires a '%(license)s' " LOG.debug("'%(capability)s' requires a '%(license)s' "
"license which is not installed.") % "license which is not installed.",
{'capability': capability, {'capability': capability,
'license': license_to_check}) 'license': license_to_check})
return False return False

View File

@ -2225,7 +2225,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def _call_host_define(self, host, def _call_host_define(self, host,
chap_name=None, chap_secret=None, domain_name=None): chap_name=None, chap_secret=None, domain_name=None):
"""Call host_define using XCLI.""" """Call host_define using XCLI."""
LOG.debug("host_define with domain: %s)" % domain_name) LOG.debug("host_define with domain: %s)", domain_name)
if domain_name: if domain_name:
if chap_name: if chap_name:
return self._call_xiv_xcli( return self._call_xiv_xcli(
@ -2291,7 +2291,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def _get_pool_domain(self, connector): def _get_pool_domain(self, connector):
pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']] pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']]
LOG.debug("pool name from configuration: %s" % pool_name) LOG.debug("pool name from configuration: %s", pool_name)
domain = None domain = None
try: try:
domain = self._call_xiv_xcli( domain = self._call_xiv_xcli(
@ -2519,7 +2519,7 @@ class XIVProxy(proxy.IBMStorageProxy):
fc_targets = list(set(target_wwpns)) fc_targets = list(set(target_wwpns))
fc_targets.sort(key=self._sort_last_digit) fc_targets.sort(key=self._sort_last_digit)
LOG.debug("fc_targets : %s" % fc_targets) LOG.debug("fc_targets : %s", fc_targets)
return fc_targets return fc_targets
def _sort_last_digit(self, a): def _sort_last_digit(self, a):
@ -2637,7 +2637,7 @@ class XIVProxy(proxy.IBMStorageProxy):
certs = certificate.CertificateCollector() certs = certificate.CertificateCollector()
path = certs.collect_certificate() path = certs.collect_certificate()
try: try:
LOG.debug('connect_multiendpoint_ssl with: %s' % address) LOG.debug('connect_multiendpoint_ssl with: %s', address)
xcli = client.XCLIClient.connect_multiendpoint_ssl( xcli = client.XCLIClient.connect_multiendpoint_ssl(
user, user,
clear_pass, clear_pass,

View File

@ -884,7 +884,7 @@ class StorwizeHelpers(object):
try: try:
resp = self.ssh.lshost(host=name) resp = self.ssh.lshost(host=name)
except exception.VolumeBackendAPIException as ex: except exception.VolumeBackendAPIException as ex:
LOG.debug("Exception message: %s" % ex.msg) LOG.debug("Exception message: %s", ex.msg)
if 'CMMVC5754E' in ex.msg: if 'CMMVC5754E' in ex.msg:
LOG.debug("CMMVC5754E found in CLI exception.") LOG.debug("CMMVC5754E found in CLI exception.")
# CMMVC5754E: The specified object does not exist # CMMVC5754E: The specified object does not exist

View File

@ -906,7 +906,7 @@ class MStorageVolumeCommon(object):
if specs['upperreport'] not in ['on', 'off']: if specs['upperreport'] not in ['on', 'off']:
LOG.debug('Illegal arguments. ' LOG.debug('Illegal arguments. '
'upperreport is not on or off.' 'upperreport is not on or off.'
'upperreport=%s' % specs['upperreport']) 'upperreport=%s', specs['upperreport'])
specs['upperreport'] = None specs['upperreport'] = None
else: else:
specs['upperreport'] = None specs['upperreport'] = None

View File

@ -812,8 +812,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
LOG.debug( LOG.debug(
'delete remaining the LD from ' 'delete remaining the LD from '
'ldset_controller_node. ' 'ldset_controller_node. '
'Ldset Name=%s.' 'Ldset Name=%s.', ldset_controller_node_name)
% ldset_controller_node_name)
self._cli.delldsetld(ldset_controller_node_name, self._cli.delldsetld(ldset_controller_node_name,
ldname) ldname)
# assign the LD to LD Set. # assign the LD to LD Set.
@ -885,7 +884,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
self._properties['ld_name_format'])) self._properties['ld_name_format']))
if ldname not in lds: if ldname not in lds:
LOG.debug('LD `%s` already unbound?' % ldname) LOG.debug('LD `%s` already unbound?', ldname)
return return
ld = lds[ldname] ld = lds[ldname]

View File

@ -644,7 +644,7 @@ class Client(client_base.Client):
except netapp_api.NaApiError as ex: except netapp_api.NaApiError as ex:
msg = 'Could not delete QOS policy groups. Details: %(ex)s' msg = 'Could not delete QOS policy groups. Details: %(ex)s'
msg_args = {'ex': ex} msg_args = {'ex': ex}
LOG.debug(msg % msg_args) LOG.debug(msg, msg_args)
def set_lun_qos_policy_group(self, path, qos_policy_group): def set_lun_qos_policy_group(self, path, qos_policy_group):
"""Sets qos_policy_group on a LUN.""" """Sets qos_policy_group on a LUN."""

View File

@ -604,7 +604,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
wwpn = str(initiator['wwpn']).replace(":", "") wwpn = str(initiator['wwpn']).replace(":", "")
wwpns_list.append(wwpn) wwpns_list.append(wwpn)
LOG.debug("initiator_wwpns=%(initiator)s " LOG.debug("initiator_wwpns=%(initiator)s "
"wwpns_list_from_array=%(wwpns)s" % "wwpns_list_from_array=%(wwpns)s",
{'initiator': initiator_wwpns, {'initiator': initiator_wwpns,
'wwpns': wwpns_list}) 'wwpns': wwpns_list})
if set(initiator_wwpns) == set(wwpns_list): if set(initiator_wwpns) == set(wwpns_list):
@ -736,7 +736,7 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
def _get_data_ip(self, netconfig): def _get_data_ip(self, netconfig):
"""Get data ip.""" """Get data ip."""
subnet_label = self.configuration.nimble_subnet_label subnet_label = self.configuration.nimble_subnet_label
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s' % LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
{'netlabel': subnet_label, 'netconf': netconfig}) {'netlabel': subnet_label, 'netconf': netconfig})
ret_data_ip = '' ret_data_ip = ''
for subnet in netconfig['array_list'][0]['nic_list']: for subnet in netconfig['array_list'][0]['nic_list']:
@ -820,7 +820,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
def _build_initiator_target_map(self, target_wwns, connector): def _build_initiator_target_map(self, target_wwns, connector):
"""Build the target_wwns and the initiator target map.""" """Build the target_wwns and the initiator target map."""
LOG.debug("_build_initiator_target_map for %(wwns)s" % LOG.debug("_build_initiator_target_map for %(wwns)s",
{'wwns': target_wwns}) {'wwns': target_wwns})
init_targ_map = {} init_targ_map = {}
@ -911,7 +911,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
raise NimbleDriverException( raise NimbleDriverException(
_('No initiator group found for initiator %s') % _('No initiator group found for initiator %s') %
initiator_name) initiator_name)
LOG.debug("initiator_target_map %s" % init_targ_map) LOG.debug("initiator_target_map %s", init_targ_map)
self.APIExecutor.remove_acl(volume, initiator_group_name) self.APIExecutor.remove_acl(volume, initiator_group_name)
eventlet.sleep(DEFAULT_SLEEP) eventlet.sleep(DEFAULT_SLEEP)
# FIXME to check for other volumes attached to the host and then # FIXME to check for other volumes attached to the host and then
@ -924,7 +924,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
def get_wwpns_from_array(self, array_name): def get_wwpns_from_array(self, array_name):
"""Retrieve the wwpns from the array""" """Retrieve the wwpns from the array"""
LOG.debug("get_wwpns_from_array %s" % array_name) LOG.debug("get_wwpns_from_array %s", array_name)
target_wwpns = [] target_wwpns = []
interface_info = self.APIExecutor.get_fc_interface_list(array_name) interface_info = self.APIExecutor.get_fc_interface_list(array_name)
LOG.info("interface_info %(interface_info)s", LOG.info("interface_info %(interface_info)s",
@ -1028,12 +1028,12 @@ class NimbleRestAPIExecutor(object):
def get_performance_policy_id(self, perf_policy_name): def get_performance_policy_id(self, perf_policy_name):
api = 'performance_policies/' api = 'performance_policies/'
filter = {'name': perf_policy_name} filter = {'name': perf_policy_name}
LOG.debug("Perfomance policy Name %s" % perf_policy_name) LOG.debug("Performance policy Name %s", perf_policy_name)
r = self.get_query(api, filter) r = self.get_query(api, filter)
if not r.json()['data']: if not r.json()['data']:
raise NimbleAPIException(_("No performance policy found for:" raise NimbleAPIException(_("No performance policy found for:"
"%(perf)s") % {'perf': perf_policy_name}) "%(perf)s") % {'perf': perf_policy_name})
LOG.debug("Performance policy ID :%(perf)s" % LOG.debug("Performance policy ID :%(perf)s",
{'perf': r.json()['data'][0]['id']}) {'perf': r.json()['data'][0]['id']})
return r.json()['data'][0]['id'] return r.json()['data'][0]['id']
@ -1380,7 +1380,7 @@ class NimbleRestAPIExecutor(object):
"snap_id: %(snap)s volume id: %(vol)s") "snap_id: %(snap)s volume id: %(vol)s")
% {'snap': snap_id, % {'snap': snap_id,
'vol': vol_id}) 'vol': vol_id})
LOG.debug("SnapInfo :%s" % six.text_type(r.json()['data'][0])) LOG.debug("SnapInfo :%s", r.json()['data'][0])
return r.json()['data'][0] return r.json()['data'][0]
def get_snap_info(self, snap_name, vol_name): def get_snap_info(self, snap_name, vol_name):
@ -1412,12 +1412,12 @@ class NimbleRestAPIExecutor(object):
try: try:
LOG.debug("data :%s", data) LOG.debug("data :%s", data)
self.put(api, data) self.put(api, data)
LOG.debug("Volume %(vol)s is in requested online state :%(flag)s" % LOG.debug("Volume %(vol)s is in requested online state :%(flag)s",
{'vol': volume_name, {'vol': volume_name,
'flag': online_flag}) 'flag': online_flag})
except Exception as ex: except Exception as ex:
msg = (_("Error %s") % ex) msg = (_("Error %s") % ex)
LOG.debug("online_vol_exception: %s" % msg) LOG.debug("online_vol_exception: %s", msg)
if msg.__contains__("Object is %s" % SM_STATE_MSG): if msg.__contains__("Object is %s" % SM_STATE_MSG):
LOG.warning('Volume %(vol)s : %(state)s', LOG.warning('Volume %(vol)s : %(state)s',
{'vol': volume_name, {'vol': volume_name,
@ -1436,11 +1436,10 @@ class NimbleRestAPIExecutor(object):
try: try:
self.put(api, data) self.put(api, data)
LOG.debug("Snapshot %(snap)s is in requested online state " LOG.debug("Snapshot %(snap)s is in requested online state "
":%(flag)s" % { ":%(flag)s",
'snap': snap_name, {'snap': snap_name, 'flag': online_flag})
'flag': online_flag})
except Exception as ex: except Exception as ex:
LOG.debug("online_snap_exception: %s" % ex) LOG.debug("online_snap_exception: %s", ex)
if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG): if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG):
LOG.warning('Snapshot %(snap)s :%(state)s', LOG.warning('Snapshot %(snap)s :%(state)s',
{'snap': snap_name, {'snap': snap_name,

View File

@ -131,10 +131,10 @@ def pure_driver_debug_trace(f):
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name, method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.__name__} "method": f.__name__}
backend_name = driver._get_current_array()._backend_id backend_name = driver._get_current_array()._backend_id
LOG.debug("[%(backend_name)s] Enter %(method_name)s" % LOG.debug("[%(backend_name)s] Enter %(method_name)s",
{"method_name": method_name, "backend_name": backend_name}) {"method_name": method_name, "backend_name": backend_name})
result = f(*args, **kwargs) result = f(*args, **kwargs)
LOG.debug("[%(backend_name)s] Leave %(method_name)s" % LOG.debug("[%(backend_name)s] Leave %(method_name)s",
{"method_name": method_name, "backend_name": backend_name}) {"method_name": method_name, "backend_name": backend_name})
return result return result
@ -1345,7 +1345,7 @@ class PureBaseVolumeDriver(san.SanDriver):
current_array = self._get_current_array() current_array = self._get_current_array()
LOG.debug("Disabling replication for volume %(id)s residing on " LOG.debug("Disabling replication for volume %(id)s residing on "
"array %(backend_id)s." % "array %(backend_id)s.",
{"id": volume["id"], {"id": volume["id"],
"backend_id": current_array._backend_id}) "backend_id": current_array._backend_id})
try: try:
@ -1384,10 +1384,9 @@ class PureBaseVolumeDriver(san.SanDriver):
current_array = self._get_current_array() current_array = self._get_current_array()
LOG.debug("Failover replication for array %(primary)s to " LOG.debug("Failover replication for array %(primary)s to "
"%(secondary)s." % { "%(secondary)s.",
"primary": current_array._backend_id, {"primary": current_array._backend_id,
"secondary": secondary_id "secondary": secondary_id})
})
if secondary_id == current_array._backend_id: if secondary_id == current_array._backend_id:
raise exception.InvalidReplicationTarget( raise exception.InvalidReplicationTarget(

View File

@ -434,8 +434,9 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
if child_size > parent_size: if child_size > parent_size:
LOG.debug('zfssa.create_volume_from_snapshot: ' LOG.debug('zfssa.create_volume_from_snapshot: '
'Parent size [%d], Child size [%d] - ' 'Parent size [%(parent_size)d], '
'resizing' % (parent_size, child_size)) 'Child size [%(child_size)d] - resizing',
{'parent_size': parent_size, 'child_size': child_size})
self.zfssa.set_lun_props(lcfg.zfssa_pool, self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project, lcfg.zfssa_project,
volume['name'], volume['name'],

View File

@ -190,7 +190,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
size = snapshot.volume_size size = snapshot.volume_size
size = utils.as_int(size) size = utils.as_int(size)
LOG.debug("Validating volume size '%(size)s' using %(functors)s" % LOG.debug("Validating volume size '%(size)s' using %(functors)s",
{'size': size, {'size': size,
'functors': ", ".join([common.make_pretty_name(func) 'functors': ", ".join([common.make_pretty_name(func)
for func in validator_functors])}) for func in validator_functors])})
@ -274,7 +274,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
LOG.debug("Retrieved volume_type from glance image metadata. " LOG.debug("Retrieved volume_type from glance image metadata. "
"image_id: %(image_id)s, " "image_id: %(image_id)s, "
"image property: %(image_volume_type)s, " "image property: %(image_volume_type)s, "
"volume_type: %(volume_type)s." % "volume_type: %(volume_type)s.",
{'image_id': image_id, {'image_id': image_id,
'image_volume_type': image_volume_type, 'image_volume_type': image_volume_type,
'volume_type': volume_type}) 'volume_type': volume_type})

View File

@ -50,7 +50,7 @@ def restore_source_status(context, db, volume_spec):
source_volid = volume_spec['source_volid'] source_volid = volume_spec['source_volid']
source_status = volume_spec['source_volstatus'] source_status = volume_spec['source_volstatus']
try: try:
LOG.debug('Restoring source %(source_volid)s status to %(status)s' % LOG.debug('Restoring source %(source_volid)s status to %(status)s',
{'status': source_status, 'source_volid': source_volid}) {'status': source_status, 'source_volid': source_volid})
db.volume_update(context, source_volid, {'status': source_status}) db.volume_update(context, source_volid, {'status': source_status})
except exception.CinderException: except exception.CinderException:

View File

@ -1944,7 +1944,7 @@ class VolumeManager(manager.CleanableManager,
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error('Unable to terminate volume connection: ' LOG.error('Unable to terminate volume connection: '
'%(err)s.' % {'err': err}) '%(err)s.', {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol.""" """Copy data from src_vol to dest_vol."""

View File

@ -64,7 +64,7 @@ def update(context, qos_specs_id, specs):
'total_iops_sec': 500, 'total_iops_sec': 500,
'total_bytes_sec': 512000,} 'total_bytes_sec': 512000,}
""" """
LOG.debug('qos_specs.update(): specs %s' % specs) LOG.debug('qos_specs.update(): specs %s', specs)
try: try:
qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_spec = objects.QualityOfServiceSpecs.get_by_id(context,

View File

@ -95,7 +95,7 @@ class ISCSITarget(driver.Target):
(volume['name'])) (volume['name']))
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
LOG.debug(("ISCSI Discovery: Found %s") % (location)) LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True properties['target_discovered'] = True
results = location.split(" ") results = location.split(" ")

View File

@ -129,7 +129,8 @@ usedevelop = False
# E251 unexpected spaces around keyword / parameter equals # E251 unexpected spaces around keyword / parameter equals
# reason: no improvement in readability # reason: no improvement in readability
ignore = E251 ignore = E251
enable-extensions = H106,H203 # H904 Delay string interpolations at logging calls.
enable-extensions = H106,H203,H904
exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build
max-complexity=30 max-complexity=30