Merge "Enable H904 check"
This commit is contained in:
commit
c7cbf7614e
@ -55,7 +55,7 @@ class CapabilitiesFilter(filters.BaseBackendFilter):
|
||||
try:
|
||||
cap = cap[scope[index]]
|
||||
except (TypeError, KeyError):
|
||||
LOG.debug("Backend doesn't provide capability '%(cap)s' " %
|
||||
LOG.debug("Backend doesn't provide capability '%(cap)s' ",
|
||||
{'cap': scope[index]})
|
||||
return False
|
||||
|
||||
|
@ -52,7 +52,7 @@ class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter):
|
||||
pass_msg = "passes" if passes else "fails"
|
||||
|
||||
LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried "
|
||||
"backends: %(backends)s" % {'backend': backend,
|
||||
'pass_msg': pass_msg,
|
||||
'backends': backends})
|
||||
"backends: %(backends)s", {'backend': backend,
|
||||
'pass_msg': pass_msg,
|
||||
'backends': backends})
|
||||
return passes
|
||||
|
@ -925,7 +925,7 @@ class VMAXMasking(object):
|
||||
serial_number, storagegroup_name)
|
||||
if not masking_list:
|
||||
LOG.debug("No masking views associated with storage group "
|
||||
"%(sg_name)s" % {'sg_name': storagegroup_name})
|
||||
"%(sg_name)s", {'sg_name': storagegroup_name})
|
||||
|
||||
@coordination.synchronized("emc-sg-{sg_name}")
|
||||
def do_remove_volume_from_sg(sg_name):
|
||||
@ -1150,7 +1150,7 @@ class VMAXMasking(object):
|
||||
num_vol_in_sg = self.rest.get_num_vols_in_sg(
|
||||
serial_number, storagegroup_name)
|
||||
LOG.debug("There are %(num_vol)d volumes remaining in the storage "
|
||||
"group %(sg_name)s." %
|
||||
"group %(sg_name)s.",
|
||||
{'num_vol': num_vol_in_sg,
|
||||
'sg_name': storagegroup_name})
|
||||
|
||||
|
@ -1439,8 +1439,8 @@ class HPE3PARCommon(object):
|
||||
for license in valid_licenses:
|
||||
if license_to_check in license.get('name'):
|
||||
return True
|
||||
LOG.debug(("'%(capability)s' requires a '%(license)s' "
|
||||
"license which is not installed.") %
|
||||
LOG.debug("'%(capability)s' requires a '%(license)s' "
|
||||
"license which is not installed.",
|
||||
{'capability': capability,
|
||||
'license': license_to_check})
|
||||
return False
|
||||
|
@ -2226,7 +2226,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
def _call_host_define(self, host,
|
||||
chap_name=None, chap_secret=None, domain_name=None):
|
||||
"""Call host_define using XCLI."""
|
||||
LOG.debug("host_define with domain: %s)" % domain_name)
|
||||
LOG.debug("host_define with domain: %s)", domain_name)
|
||||
if domain_name:
|
||||
if chap_name:
|
||||
return self._call_xiv_xcli(
|
||||
@ -2292,7 +2292,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
|
||||
def _get_pool_domain(self, connector):
|
||||
pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']]
|
||||
LOG.debug("pool name from configuration: %s" % pool_name)
|
||||
LOG.debug("pool name from configuration: %s", pool_name)
|
||||
domain = None
|
||||
try:
|
||||
domain = self._call_xiv_xcli(
|
||||
@ -2520,7 +2520,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
|
||||
fc_targets = list(set(target_wwpns))
|
||||
fc_targets.sort(key=self._sort_last_digit)
|
||||
LOG.debug("fc_targets : %s" % fc_targets)
|
||||
LOG.debug("fc_targets : %s", fc_targets)
|
||||
return fc_targets
|
||||
|
||||
def _sort_last_digit(self, a):
|
||||
@ -2638,7 +2638,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
certs = certificate.CertificateCollector()
|
||||
path = certs.collect_certificate()
|
||||
try:
|
||||
LOG.debug('connect_multiendpoint_ssl with: %s' % address)
|
||||
LOG.debug('connect_multiendpoint_ssl with: %s', address)
|
||||
xcli = client.XCLIClient.connect_multiendpoint_ssl(
|
||||
user,
|
||||
clear_pass,
|
||||
|
@ -919,7 +919,7 @@ class StorwizeHelpers(object):
|
||||
try:
|
||||
resp = self.ssh.lshost(host=name)
|
||||
except exception.VolumeBackendAPIException as ex:
|
||||
LOG.debug("Exception message: %s" % ex.msg)
|
||||
LOG.debug("Exception message: %s", ex.msg)
|
||||
if 'CMMVC5754E' in ex.msg:
|
||||
LOG.debug("CMMVC5754E found in CLI exception.")
|
||||
# CMMVC5754E: The specified object does not exist
|
||||
|
@ -907,7 +907,7 @@ class MStorageVolumeCommon(object):
|
||||
if specs['upperreport'] not in ['on', 'off']:
|
||||
LOG.debug('Illegal arguments. '
|
||||
'upperreport is not on or off.'
|
||||
'upperreport=%s' % specs['upperreport'])
|
||||
'upperreport=%s', specs['upperreport'])
|
||||
specs['upperreport'] = None
|
||||
else:
|
||||
specs['upperreport'] = None
|
||||
|
@ -812,8 +812,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
LOG.debug(
|
||||
'delete remaining the LD from '
|
||||
'ldset_controller_node. '
|
||||
'Ldset Name=%s.'
|
||||
% ldset_controller_node_name)
|
||||
'Ldset Name=%s.', ldset_controller_node_name)
|
||||
self._cli.delldsetld(ldset_controller_node_name,
|
||||
ldname)
|
||||
# assign the LD to LD Set.
|
||||
@ -885,7 +884,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
self._properties['ld_name_format']))
|
||||
|
||||
if ldname not in lds:
|
||||
LOG.debug('LD `%s` already unbound?' % ldname)
|
||||
LOG.debug('LD `%s` already unbound?', ldname)
|
||||
return
|
||||
|
||||
ld = lds[ldname]
|
||||
|
@ -644,7 +644,7 @@ class Client(client_base.Client):
|
||||
except netapp_api.NaApiError as ex:
|
||||
msg = 'Could not delete QOS policy groups. Details: %(ex)s'
|
||||
msg_args = {'ex': ex}
|
||||
LOG.debug(msg % msg_args)
|
||||
LOG.debug(msg, msg_args)
|
||||
|
||||
def set_lun_qos_policy_group(self, path, qos_policy_group):
|
||||
"""Sets qos_policy_group on a LUN."""
|
||||
|
@ -605,7 +605,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
wwpn = str(initiator['wwpn']).replace(":", "")
|
||||
wwpns_list.append(wwpn)
|
||||
LOG.debug("initiator_wwpns=%(initiator)s "
|
||||
"wwpns_list_from_array=%(wwpns)s" %
|
||||
"wwpns_list_from_array=%(wwpns)s",
|
||||
{'initiator': initiator_wwpns,
|
||||
'wwpns': wwpns_list})
|
||||
if set(initiator_wwpns) == set(wwpns_list):
|
||||
@ -737,7 +737,7 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
|
||||
def _get_data_ip(self, netconfig):
|
||||
"""Get data ip."""
|
||||
subnet_label = self.configuration.nimble_subnet_label
|
||||
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s' %
|
||||
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
|
||||
{'netlabel': subnet_label, 'netconf': netconfig})
|
||||
ret_data_ip = ''
|
||||
for subnet in netconfig['array_list'][0]['nic_list']:
|
||||
@ -821,7 +821,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
|
||||
|
||||
def _build_initiator_target_map(self, target_wwns, connector):
|
||||
"""Build the target_wwns and the initiator target map."""
|
||||
LOG.debug("_build_initiator_target_map for %(wwns)s" %
|
||||
LOG.debug("_build_initiator_target_map for %(wwns)s",
|
||||
{'wwns': target_wwns})
|
||||
init_targ_map = {}
|
||||
|
||||
@ -912,7 +912,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
|
||||
raise NimbleDriverException(
|
||||
_('No initiator group found for initiator %s') %
|
||||
initiator_name)
|
||||
LOG.debug("initiator_target_map %s" % init_targ_map)
|
||||
LOG.debug("initiator_target_map %s", init_targ_map)
|
||||
self.APIExecutor.remove_acl(volume, initiator_group_name)
|
||||
eventlet.sleep(DEFAULT_SLEEP)
|
||||
# FIXME to check for other volumes attached to the host and then
|
||||
@ -925,7 +925,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
|
||||
|
||||
def get_wwpns_from_array(self, array_name):
|
||||
"""Retrieve the wwpns from the array"""
|
||||
LOG.debug("get_wwpns_from_array %s" % array_name)
|
||||
LOG.debug("get_wwpns_from_array %s", array_name)
|
||||
target_wwpns = []
|
||||
interface_info = self.APIExecutor.get_fc_interface_list(array_name)
|
||||
LOG.info("interface_info %(interface_info)s",
|
||||
@ -1029,12 +1029,12 @@ class NimbleRestAPIExecutor(object):
|
||||
def get_performance_policy_id(self, perf_policy_name):
|
||||
api = 'performance_policies/'
|
||||
filter = {'name': perf_policy_name}
|
||||
LOG.debug("Perfomance policy Name %s" % perf_policy_name)
|
||||
LOG.debug("Performance policy Name %s", perf_policy_name)
|
||||
r = self.get_query(api, filter)
|
||||
if not r.json()['data']:
|
||||
raise NimbleAPIException(_("No performance policy found for:"
|
||||
"%(perf)s") % {'perf': perf_policy_name})
|
||||
LOG.debug("Performance policy ID :%(perf)s" %
|
||||
LOG.debug("Performance policy ID :%(perf)s",
|
||||
{'perf': r.json()['data'][0]['id']})
|
||||
return r.json()['data'][0]['id']
|
||||
|
||||
@ -1381,7 +1381,7 @@ class NimbleRestAPIExecutor(object):
|
||||
"snap_id: %(snap)s volume id: %(vol)s")
|
||||
% {'snap': snap_id,
|
||||
'vol': vol_id})
|
||||
LOG.debug("SnapInfo :%s" % six.text_type(r.json()['data'][0]))
|
||||
LOG.debug("SnapInfo :%s", r.json()['data'][0])
|
||||
return r.json()['data'][0]
|
||||
|
||||
def get_snap_info(self, snap_name, vol_name):
|
||||
@ -1413,12 +1413,12 @@ class NimbleRestAPIExecutor(object):
|
||||
try:
|
||||
LOG.debug("data :%s", data)
|
||||
self.put(api, data)
|
||||
LOG.debug("Volume %(vol)s is in requested online state :%(flag)s" %
|
||||
LOG.debug("Volume %(vol)s is in requested online state :%(flag)s",
|
||||
{'vol': volume_name,
|
||||
'flag': online_flag})
|
||||
except Exception as ex:
|
||||
msg = (_("Error %s") % ex)
|
||||
LOG.debug("online_vol_exception: %s" % msg)
|
||||
LOG.debug("online_vol_exception: %s", msg)
|
||||
if msg.__contains__("Object is %s" % SM_STATE_MSG):
|
||||
LOG.warning('Volume %(vol)s : %(state)s',
|
||||
{'vol': volume_name,
|
||||
@ -1437,11 +1437,10 @@ class NimbleRestAPIExecutor(object):
|
||||
try:
|
||||
self.put(api, data)
|
||||
LOG.debug("Snapshot %(snap)s is in requested online state "
|
||||
":%(flag)s" % {
|
||||
'snap': snap_name,
|
||||
'flag': online_flag})
|
||||
":%(flag)s",
|
||||
{'snap': snap_name, 'flag': online_flag})
|
||||
except Exception as ex:
|
||||
LOG.debug("online_snap_exception: %s" % ex)
|
||||
LOG.debug("online_snap_exception: %s", ex)
|
||||
if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG):
|
||||
LOG.warning('Snapshot %(snap)s :%(state)s',
|
||||
{'snap': snap_name,
|
||||
|
@ -132,10 +132,10 @@ def pure_driver_debug_trace(f):
|
||||
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
|
||||
"method": f.__name__}
|
||||
backend_name = driver._get_current_array()._backend_id
|
||||
LOG.debug("[%(backend_name)s] Enter %(method_name)s" %
|
||||
LOG.debug("[%(backend_name)s] Enter %(method_name)s",
|
||||
{"method_name": method_name, "backend_name": backend_name})
|
||||
result = f(*args, **kwargs)
|
||||
LOG.debug("[%(backend_name)s] Leave %(method_name)s" %
|
||||
LOG.debug("[%(backend_name)s] Leave %(method_name)s",
|
||||
{"method_name": method_name, "backend_name": backend_name})
|
||||
return result
|
||||
|
||||
@ -1346,7 +1346,7 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
|
||||
current_array = self._get_current_array()
|
||||
LOG.debug("Disabling replication for volume %(id)s residing on "
|
||||
"array %(backend_id)s." %
|
||||
"array %(backend_id)s.",
|
||||
{"id": volume["id"],
|
||||
"backend_id": current_array._backend_id})
|
||||
try:
|
||||
@ -1385,10 +1385,9 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
|
||||
current_array = self._get_current_array()
|
||||
LOG.debug("Failover replication for array %(primary)s to "
|
||||
"%(secondary)s." % {
|
||||
"primary": current_array._backend_id,
|
||||
"secondary": secondary_id
|
||||
})
|
||||
"%(secondary)s.",
|
||||
{"primary": current_array._backend_id,
|
||||
"secondary": secondary_id})
|
||||
|
||||
if secondary_id == current_array._backend_id:
|
||||
raise exception.InvalidReplicationTarget(
|
||||
|
@ -435,8 +435,9 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
|
||||
if child_size > parent_size:
|
||||
LOG.debug('zfssa.create_volume_from_snapshot: '
|
||||
'Parent size [%d], Child size [%d] - '
|
||||
'resizing' % (parent_size, child_size))
|
||||
'Parent size [%(parent_size)d], '
|
||||
'Child size [%(child_size)d] - resizing',
|
||||
{'parent_size': parent_size, 'child_size': child_size})
|
||||
self.zfssa.set_lun_props(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
|
@ -190,7 +190,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
size = snapshot.volume_size
|
||||
|
||||
size = utils.as_int(size)
|
||||
LOG.debug("Validating volume size '%(size)s' using %(functors)s" %
|
||||
LOG.debug("Validating volume size '%(size)s' using %(functors)s",
|
||||
{'size': size,
|
||||
'functors': ", ".join([common.make_pretty_name(func)
|
||||
for func in validator_functors])})
|
||||
@ -274,7 +274,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
LOG.debug("Retrieved volume_type from glance image metadata. "
|
||||
"image_id: %(image_id)s, "
|
||||
"image property: %(image_volume_type)s, "
|
||||
"volume_type: %(volume_type)s." %
|
||||
"volume_type: %(volume_type)s.",
|
||||
{'image_id': image_id,
|
||||
'image_volume_type': image_volume_type,
|
||||
'volume_type': volume_type})
|
||||
|
@ -50,7 +50,7 @@ def restore_source_status(context, db, volume_spec):
|
||||
source_volid = volume_spec['source_volid']
|
||||
source_status = volume_spec['source_volstatus']
|
||||
try:
|
||||
LOG.debug('Restoring source %(source_volid)s status to %(status)s' %
|
||||
LOG.debug('Restoring source %(source_volid)s status to %(status)s',
|
||||
{'status': source_status, 'source_volid': source_volid})
|
||||
db.volume_update(context, source_volid, {'status': source_status})
|
||||
except exception.CinderException:
|
||||
|
@ -1950,7 +1950,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error('Unable to terminate volume connection: '
|
||||
'%(err)s.' % {'err': err})
|
||||
'%(err)s.', {'err': err})
|
||||
|
||||
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
|
||||
"""Copy data from src_vol to dest_vol."""
|
||||
|
@ -64,7 +64,7 @@ def update(context, qos_specs_id, specs):
|
||||
'total_iops_sec': 500,
|
||||
'total_bytes_sec': 512000,}
|
||||
"""
|
||||
LOG.debug('qos_specs.update(): specs %s' % specs)
|
||||
LOG.debug('qos_specs.update(): specs %s', specs)
|
||||
|
||||
try:
|
||||
qos_spec = objects.QualityOfServiceSpecs.get_by_id(context,
|
||||
|
@ -95,7 +95,7 @@ class ISCSITarget(driver.Target):
|
||||
(volume['name']))
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
LOG.debug(("ISCSI Discovery: Found %s") % (location))
|
||||
LOG.debug("ISCSI Discovery: Found %s", location)
|
||||
properties['target_discovered'] = True
|
||||
|
||||
results = location.split(" ")
|
||||
|
3
tox.ini
3
tox.ini
@ -129,7 +129,8 @@ usedevelop = False
|
||||
# E251 unexpected spaces around keyword / parameter equals
|
||||
# reason: no improvement in readability
|
||||
ignore = E251
|
||||
enable-extensions = H106,H203
|
||||
# H904 Delay string interpolations at logging calls.
|
||||
enable-extensions = H106,H203,H904
|
||||
exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build
|
||||
max-complexity=30
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user