diff --git a/cinder/backup/api.py b/cinder/backup/api.py index bcb4308d184..500aad16078 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -182,8 +182,7 @@ class API(base.Base): idx = 0 while idx < len(services): srv = services[idx] - if(self._az_matched(srv, availability_zone) and - srv.is_up): + if (self._az_matched(srv, availability_zone) and srv.is_up): return srv.host idx = idx + 1 return None diff --git a/cinder/cmd/status.py b/cinder/cmd/status.py index 67c85bae491..49b44f84da3 100644 --- a/cinder/cmd/status.py +++ b/cinder/cmd/status.py @@ -287,9 +287,9 @@ def main(): try: return uc.main(CONF, 'cinder', Checks()) except cfg.ConfigDirNotFoundError: - return('ERROR: cannot read the cinder configuration directory.\n' - 'Please re-run using the --config-dir option ' - 'with a valid cinder configuration directory.') + return ('ERROR: cannot read the cinder configuration directory.\n' + 'Please re-run using the --config-dir option ' + 'with a valid cinder configuration directory.') if __name__ == '__main__': diff --git a/cinder/image/accelerator.py b/cinder/image/accelerator.py index bf4137932b7..a6539162df6 100644 --- a/cinder/image/accelerator.py +++ b/cinder/image/accelerator.py @@ -53,7 +53,7 @@ class ImageAccel(object): self.src = src self.dest = dest self.compression_format = CONF.compression_format - if(self.compression_format == 'gzip'): + if (self.compression_format == 'gzip'): self._accel_engine_path = _ACCEL_PATH_PREFERENCE_ORDER_LIST else: self._accel_engine_path = None diff --git a/cinder/scheduler/evaluator/evaluator.py b/cinder/scheduler/evaluator/evaluator.py index 4b686932154..efc84fe6bb9 100644 --- a/cinder/scheduler/evaluator/evaluator.py +++ b/cinder/scheduler/evaluator/evaluator.py @@ -32,7 +32,7 @@ def _operatorOperands(tokenList): try: op1 = next(it) op2 = next(it) - yield(op1, op2) + yield (op1, op2) except StopIteration: break diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index a71f4708dee..031beb5c2d1 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -313,11 +313,11 @@ class BackendState(object): pool_cap['timestamp'] = self.updated self.capabilities = typing.cast(ReadOnlyDict, self.capabilities) - if('filter_function' not in pool_cap and + if ('filter_function' not in pool_cap and 'filter_function' in self.capabilities): pool_cap['filter_function'] = self.capabilities['filter_function'] - if('goodness_function' not in pool_cap and + if ('goodness_function' not in pool_cap and 'goodness_function' in self.capabilities): pool_cap['goodness_function'] = ( self.capabilities['goodness_function']) diff --git a/cinder/ssh_utils.py b/cinder/ssh_utils.py index bef393e28a5..341440cdead 100644 --- a/cinder/ssh_utils.py +++ b/cinder/ssh_utils.py @@ -115,11 +115,11 @@ class SSHPool(pools.Pool): self.resize(1) # release all but the last connection using # get and put to allow any get waiters to complete. - while(self.waiting() or self.current_size > 1): + while (self.waiting() or self.current_size > 1): conn = self.get() self.put(conn) # Now free everthing that is left - while(self.free_items): + while (self.free_items): self.free_items.popleft().close() self.current_size -= 1 diff --git a/cinder/tests/hacking/checks.py b/cinder/tests/hacking/checks.py index 84bfe5653a0..c9f5fc0bbda 100644 --- a/cinder/tests/hacking/checks.py +++ b/cinder/tests/hacking/checks.py @@ -115,7 +115,7 @@ def no_translate_logs(logical_line, filename): C312 """ if translated_log.match(logical_line): - yield(0, "C312: Log messages should not be translated!") + yield (0, "C312: Log messages should not be translated!") @core.flake8ext @@ -145,7 +145,7 @@ def check_explicit_underscore_import(logical_line, filename): custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif string_translation.match(logical_line): - yield(0, "N323: Found use of _() without explicit import of _ !") + yield (0, "N323: Found use of _() without explicit import of _ !") class CheckLoggingFormatArgs(BaseASTChecker): @@ -305,7 +305,7 @@ def check_datetime_now(logical_line, noqa): msg = ("C301: Found datetime.now(). " "Please use timeutils.utcnow() from oslo_utils.") if 'datetime.now' in logical_line: - yield(0, msg) + yield (0, msg) @core.flake8ext @@ -322,7 +322,7 @@ def check_no_print_statements(logical_line, filename, noqa): msg = ("C303: print() should not be used. " "Please use LOG.[info|error|warning|exception|debug]. " "If print() must be used, use '# noqa' to skip this check.") - yield(0, msg) + yield (0, msg) @core.flake8ext @@ -354,7 +354,7 @@ def validate_assertTrue(logical_line, filename): if re.match(assert_True, logical_line): msg = ("C313: Unit tests should use assertTrue(value) instead" " of using assertEqual(True, value).") - yield(0, msg) + yield (0, msg) third_party_mock = re.compile("^import.mock") @@ -369,7 +369,7 @@ def no_third_party_mock(logical_line): re.match(from_third_party_mock, logical_line)): msg = ('C337: Unit tests should use the standard library "mock" ' 'module, not the third party mock lib.') - yield(0, msg) + yield (0, msg) @core.flake8ext diff --git a/cinder/tests/unit/brick/test_brick_lvm.py b/cinder/tests/unit/brick/test_brick_lvm.py index b4605446b58..2b89b84ba6d 100644 --- a/cinder/tests/unit/brick/test_brick_lvm.py +++ b/cinder/tests/unit/brick/test_brick_lvm.py @@ -73,12 +73,12 @@ class BrickLvmTestCase(test.TestCase): data = " fake-vg\n" elif _lvm_prefix + 'lvm version' in cmd_string: data = " LVM version: 2.03.07(2) (2019-11-30)\n" - elif(_lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in - cmd_string): + elif (_lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in + cmd_string): data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" - elif(_lvm_prefix + 'vgs --noheadings --unit=g ' - '-o name,size,free,lv_count,uuid ' - '--separator : --nosuffix' in cmd_string): + elif (_lvm_prefix + 'vgs --noheadings --unit=g ' + '-o name,size,free,lv_count,uuid ' + '--separator : --nosuffix' in cmd_string): data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: diff --git a/cinder/tests/unit/test_image_utils.py b/cinder/tests/unit/test_image_utils.py index 164d58b4035..b399556d608 100644 --- a/cinder/tests/unit/test_image_utils.py +++ b/cinder/tests/unit/test_image_utils.py @@ -2245,7 +2245,7 @@ class TestVmdkImageChecks(test.TestCase): def test_check_vmdk_image_handles_missing_info(self): expected = 'Unable to determine VMDK createType' # remove create-type - del(self.qdata_data['create-type']) + del (self.qdata_data['create-type']) iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, @@ -2253,7 +2253,7 @@ class TestVmdkImageChecks(test.TestCase): self.assertIn(expected, str(iue)) # remove entire data section - del(self.qdata_data) + del (self.qdata_data) iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py index 04f8754e43b..2458694417c 100644 --- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py +++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py @@ -5360,7 +5360,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver): conf = self._set_unique_fqdn_override(False, in_shared) my_connector = self.connector.copy() - del(my_connector['initiator']) + del (my_connector['initiator']) my_connector['host'] = "abc123abc123abc123abc123abc123abc123" safe_host = common._safe_hostname(my_connector, conf) self.assertEqual(fixed_hostname, safe_host) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py index 0733066fe95..634f20c4944 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py @@ -209,11 +209,11 @@ class IBMStorageFakeProxyDriver(object): # Delete snapshots in group self.snapshots = {k: snap for k, snap in self.snapshots.items() - if not(snap.get('group_id') == group.get('id'))} + if not (snap.get('group_id') == group.get('id'))} # Delete volume in group self.volumes = {k: vol for k, vol in self.volumes.items() - if not(vol.get('group_id') == group.get('id'))} + if not (vol.get('group_id') == group.get('id'))} return {'status': fields.GroupStatus.DELETED}, volumes @@ -256,8 +256,8 @@ class IBMStorageFakeProxyDriver(object): # Delete snapshots in group self.snapshots = {k: snap for k, snap in self.snapshots.items() - if not(snap.get('group_id') - == group_snapshot.get('group_snapshot_id'))} + if not (snap.get('group_id') + == group_snapshot.get('group_snapshot_id'))} return {'status': 'deleted'}, updated_snapshots @@ -310,7 +310,7 @@ class IBMStorageFakeProxyDriver(object): volume_update_list) def get_replication_error_status(self, context, groups): - return( + return ( [{'group_id': groups[0]['id'], 'replication_status': fields.ReplicationStatus.ERROR}], [{'volume_id': VOLUME['id'], diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py index 7468e336e4a..3af22c8aa95 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py @@ -800,8 +800,7 @@ port_speed!N/A value1 = filter1.split('=')[1] value2 = filter2.split('=')[1] for v in ports: - if(str(v[5]) == value1 and str( - v[7]) == value2): + if (str(v[5]) == value1 and str(v[7]) == value2): rows.append(v) else: value = kwargs['filtervalue'].split('=')[1] @@ -1645,8 +1644,8 @@ port_speed!N/A self._fcmappings_list[fcmap_info['id']] = fcmap_info - return('FlashCopy Mapping, id [' + fcmap_info['id'] + - '], successfully created', '') + return ('FlashCopy Mapping, id [' + fcmap_info['id'] + + '], successfully created', '') def _cmd_prestartfcmap(self, **kwargs): if 'obj' not in kwargs: @@ -1801,8 +1800,8 @@ port_speed!N/A self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info - return('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] + - '], successfully created', '') + return ('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] + + '], successfully created', '') def _cmd_prestartfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: @@ -2225,8 +2224,8 @@ port_speed!N/A self._volumes_list[master_vol]['RC_id'] = rcrel_info['id'] self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id'] - return('RC Relationship, id [' + rcrel_info['id'] + - '], successfully created', '') + return ('RC Relationship, id [' + rcrel_info['id'] + + '], successfully created', '') def _cmd_lsrcrelationship(self, **kwargs): rows = [] @@ -2791,8 +2790,8 @@ port_speed!N/A rccg_info['cycle_period_seconds'] = '300' self._rcconsistgrp_list[rccg_info['name']] = rccg_info - return('RC Consistency Group, id [' + rccg_info['id'] + - '], successfully created', '') + return ('RC Consistency Group, id [' + rccg_info['id'] + + '], successfully created', '') def _cmd_lsrcconsistgrp(self, **kwargs): rows = [] @@ -3009,7 +3008,7 @@ port_speed!N/A partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info - return('', '') + return ('', '') def _cmd_mkfcpartnership(self, **kwargs): if 'obj' not in kwargs: @@ -3036,7 +3035,7 @@ port_speed!N/A partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info - return('', '') + return ('', '') def _cmd_chpartnership(self, **kwargs): if 'obj' not in kwargs: @@ -3048,7 +3047,7 @@ port_speed!N/A partner_state = ('fully_configured' if 'start' in kwargs else 'fully_configured_stopped') self._partnership_list[peer_sys]['partnership'] = partner_state - return('', '') + return ('', '') # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): @@ -10936,7 +10935,7 @@ class StorwizeHelpersTestCase(test.TestCase): 'rc_controlled': 'no', 'source_vdisk_name': 'testvol'} get_relationship_info.return_value = None - if(fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' + if (fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' and fc_data['status'] == 'copying'): (self.assertRaises(loopingcall.LoopingCallDone, self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, @@ -10955,7 +10954,7 @@ class StorwizeHelpersTestCase(test.TestCase): self.assertEqual(1, get_fc_mapping_attributes.call_count) self.assertEqual(0, rmfcmap.call_count) - if(fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' + if (fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' and fc_data['status'] in ['copying', 'idle_or_copied']): chfcmap.assert_called_with('4', copyrate='50', autodel='on') self.assertEqual(1, chfcmap.call_count) @@ -10995,8 +10994,8 @@ class StorwizeHelpersTestCase(test.TestCase): 'source_vdisk_name': 'testvol'} rel_info = {'name': 'rcrel232'} get_relationship_info.return_value = rel_info - if(fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' - and fc_data['status'] == 'copying'): + if (fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' + and fc_data['status'] == 'copying'): (self.assertRaises(loopingcall.LoopingCallDone, self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, False, rel_info)) @@ -11014,7 +11013,7 @@ class StorwizeHelpersTestCase(test.TestCase): self.assertEqual(1, get_fc_mapping_attributes.call_count) self.assertEqual(0, rmfcmap.call_count) - if(fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' + if (fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' and fc_data['status'] in ['copying', 'idle_or_copied']): chfcmap.assert_called_with('4', copyrate='50', autodel='on') self.assertEqual(1, chfcmap.call_count) diff --git a/cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py b/cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py index c468194e171..e78b5130230 100644 --- a/cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py +++ b/cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py @@ -1602,8 +1602,8 @@ port_speed!N/A self._lcmappings_list[lcmap_info['id']] = lcmap_info - return('LocalCopy Mapping, id [' + lcmap_info['id'] + - '], successfully created', '') + return ('LocalCopy Mapping, id [' + lcmap_info['id'] + + '], successfully created', '') def _cmd_prestartlcmap(self, **kwargs): if 'obj' not in kwargs: @@ -1705,8 +1705,8 @@ port_speed!N/A self._lcconsistgrp_list[lcconsistgrp_info['id']] = lcconsistgrp_info - return('LocalCopy Consistency Group, id [' + lcconsistgrp_info['id'] + - '], successfully created', '') + return ('LocalCopy Consistency Group, id [' + lcconsistgrp_info['id'] + + '], successfully created', '') def _cmd_prestartlcconsistgrp(self, **kwargs): if 'obj' not in kwargs: @@ -1998,8 +1998,8 @@ port_speed!N/A self._volumes_list[master_vol]['RC_id'] = rcrel_info['id'] self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id'] - return('RC Relationship, id [' + rcrel_info['id'] + - '], successfully created', '') + return ('RC Relationship, id [' + rcrel_info['id'] + + '], successfully created', '') def _cmd_startrcrelationship(self, **kwargs): if 'obj' not in kwargs: @@ -2131,7 +2131,7 @@ port_speed!N/A partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info - return('', '') + return ('', '') def _cmd_mkfcpartnership(self, **kwargs): if 'obj' not in kwargs: @@ -2158,7 +2158,7 @@ port_speed!N/A partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info - return('', '') + return ('', '') def _cmd_chpartnership(self, **kwargs): if 'obj' not in kwargs: @@ -2170,7 +2170,7 @@ port_speed!N/A partner_state = ('fully_configured' if 'start' in kwargs else 'fully_configured_stopped') self._partnership_list[peer_sys]['partnership'] = partner_state - return('', '') + return ('', '') # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): diff --git a/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py b/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py index 35fddb1bbee..3f4130bfdc5 100644 --- a/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py +++ b/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py @@ -202,7 +202,7 @@ class DBMock(object): return httpstatus.OK, vol def update_volume(self, **kwargs): - assert("project_name" in kwargs and kwargs["project_name"]), \ + assert ("project_name" in kwargs and kwargs["project_name"]), \ "project_name must be provided" def create_snapshot(self, snapshot) -> Tuple[int, Dict]: diff --git a/cinder/tests/unit/volume/drivers/netapp/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/test_utils.py index 2d0ef346afb..3bd316c2847 100644 --- a/cinder/tests/unit/volume/drivers/netapp/test_utils.py +++ b/cinder/tests/unit/volume/drivers/netapp/test_utils.py @@ -723,7 +723,7 @@ class NetAppDriverUtilsTestCase(test.TestCase): def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): volume_type = copy.deepcopy(fake.VOLUME_TYPE) - del(volume_type['qos_specs_id']) + del (volume_type['qos_specs_id']) mock_get_context = self.mock_object(context, 'get_admin_context') result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py index fa629599a2d..8ca3ac59d9a 100644 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py @@ -1348,7 +1348,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase): def test_get_flow(self, is_migration_target, use_quota, flow_mock, extract_ref_mock, onfailure_mock, extract_spec_mock, notify_mock, create_mock, onfinish_mock, load_mock): - assert(isinstance(is_migration_target, bool)) + assert (isinstance(is_migration_target, bool)) filter_properties = {'retry': mock.sentinel.retry} tasks = [mock.call(extract_ref_mock.return_value), mock.call(onfailure_mock.return_value), diff --git a/cinder/utils.py b/cinder/utils.py index 823bd22c81c..a4e5e2e40d5 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -576,7 +576,7 @@ def check_string_length(value: str, name: str, min_length: int = 0, strutils.check_string_length(value, name=name, min_length=min_length, max_length=max_length) - except(ValueError, TypeError) as exc: + except (ValueError, TypeError) as exc: raise exception.InvalidInput(reason=exc) if not allow_all_spaces and value.isspace(): diff --git a/cinder/volume/drivers/datacore/driver.py b/cinder/volume/drivers/datacore/driver.py index 0cb38b56a42..ca77d808872 100644 --- a/cinder/volume/drivers/datacore/driver.py +++ b/cinder/volume/drivers/datacore/driver.py @@ -411,7 +411,7 @@ class DataCoreVolumeDriver(driver.VolumeDriver): kwargs = {'existing_ref': vd_alias, 'reason': 'Specified Virtual disk does not exist.'} raise cinder_exception.ManageExistingInvalidReference(**kwargs) - return(self._get_size_in_gigabytes(virtual_disk.Size.Value)) + return (self._get_size_in_gigabytes(virtual_disk.Size.Value)) def _update_volume_stats(self): performance_data = self._api.get_performance_by_type( diff --git a/cinder/volume/drivers/hedvig/rest_client.py b/cinder/volume/drivers/hedvig/rest_client.py index 688955a638d..dfa421c6d46 100644 --- a/cinder/volume/drivers/hedvig/rest_client.py +++ b/cinder/volume/drivers/hedvig/rest_client.py @@ -382,7 +382,7 @@ class RestClient(object): if obj['status'] != 'ok': msg = "is not mapped to the specified controller" - if(msg not in obj['message']): + if (msg not in obj['message']): errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index 9094f7e8936..737d97a5a3f 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -1578,7 +1578,7 @@ class HPE3PARCommon(object): """We have to use a safe hostname length for 3PAR host names.""" hostname = connector['host'] unique_fqdn_network = configuration.unique_fqdn_network - if(not unique_fqdn_network and connector.get('initiator')): + if (not unique_fqdn_network and connector.get('initiator')): iqn = connector.get('initiator') iqn = iqn.replace(":", "-") return iqn[::-1][:31] diff --git a/cinder/volume/drivers/hpe/nimble.py b/cinder/volume/drivers/hpe/nimble.py index d25b90c765a..4f15caf8b65 100644 --- a/cinder/volume/drivers/hpe/nimble.py +++ b/cinder/volume/drivers/hpe/nimble.py @@ -286,7 +286,7 @@ class NimbleBaseVolumeDriver(san.SanDriver): self._group_target_enabled, self._storage_protocol, pool_name) - if(volume['size'] > snapshot['volume_size']): + if (volume['size'] > snapshot['volume_size']): vol_size = volume['size'] * units.Ki reserve_size = 100 if reserve else 0 data = {"data": {'size': vol_size, diff --git a/cinder/volume/drivers/ibm/gpfs.py b/cinder/volume/drivers/ibm/gpfs.py index 792b15dfa63..0821e73f48d 100644 --- a/cinder/volume/drivers/ibm/gpfs.py +++ b/cinder/volume/drivers/ibm/gpfs.py @@ -436,13 +436,13 @@ class GPFSDriver(driver.CloneableImageVD, LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - if(self.configuration.gpfs_images_share_mode and + if (self.configuration.gpfs_images_share_mode and self.configuration.gpfs_images_dir is None): msg = _('Option gpfs_images_dir is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and + if (self.configuration.gpfs_images_share_mode == 'copy_on_write' and not self._same_filesystem(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' @@ -453,7 +453,7 @@ class GPFSDriver(driver.CloneableImageVD, LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and + if (self.configuration.gpfs_images_share_mode == 'copy_on_write' and not self._is_same_fileset(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' @@ -884,8 +884,8 @@ class GPFSDriver(driver.CloneableImageVD, def _is_cloneable(self, image_id): """Return true if the specified image can be cloned by GPFS.""" - if not((self.configuration.gpfs_images_dir and - self.configuration.gpfs_images_share_mode)): + if not ((self.configuration.gpfs_images_dir and + self.configuration.gpfs_images_share_mode)): reason = 'glance repository not configured to use GPFS' return False, reason, None diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index 86f3eb3073c..c5c6ef792de 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -4439,7 +4439,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, if 'IOThrottle_rate' in model_update['metadata']: del model_update['metadata']['IOThrottle_rate'] model_update['host'] = volume['host'] - return(model_update) + return (model_update) def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False): return self._helpers.add_vdisk_copy(volume, dest_pool, diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py index 19608e37b50..c0e630c976a 100644 --- a/cinder/volume/drivers/nexenta/iscsi.py +++ b/cinder/volume/drivers/nexenta/iscsi.py @@ -177,11 +177,11 @@ class NexentaISCSIDriver(driver.ISCSIDriver): target_names = self.targets.keys() if provider_location: target_name = provider_location.split(',1 ')[1].split(' ')[0] - if not(self.targets.get(target_name)): + if not (self.targets.get(target_name)): self.targets[target_name] = [] - if not(volume['name'] in self.targets[target_name]): + if not (volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) - elif not(target_names): + elif not (target_names): # create first target and target group target_name = self._create_target(0) self.targets[target_name].append(volume['name']) @@ -193,7 +193,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): if len(self.targets[target_name]) >= 20: # create new target and target group target_name = self._create_target(len(target_names)) - if not(volume['name'] in self.targets[target_name]): + if not (volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) return target_name diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index b2a37a8c8fa..b56ef52e180 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -177,7 +177,7 @@ class NfsDriver(remotefs.RemoteFSSnapDriverDistributed): # If both nas_host and nas_share_path are set we are not # going to use the nfs_shares_config file. So, only check # for its existence if it is going to be used. - if((not nas_host) or (not nas_share_path)): + if ((not nas_host) or (not nas_share_path)): config = self.configuration.nfs_shares_config if not config: msg = (_("There's no NFS config file configured (%s)") % diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 6f26bc47e5f..b83c296772f 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -1257,7 +1257,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): LOG.info('Deleting stale snapshot: %s', snapshot.id) self._delete(snapshot_path) - del(snap_info[snapshot.id]) + del (snap_info[snapshot.id]) self._write_info_file(info_path, snap_info) def _delete_snapshot(self, snapshot: objects.Snapshot) -> None: @@ -1434,7 +1434,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): self._rebase_img(higher_file_path, base_file, base_file_fmt) # Remove snapshot_file from info - del(snap_info[snapshot.id]) + del (snap_info[snapshot.id]) self._write_info_file(info_path, snap_info) def _create_volume_from_snapshot(self, @@ -1835,7 +1835,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): 'type': 'qcow2', 'volume_id': snapshot.volume.id} - del(snap_info[snapshot.id]) + del (snap_info[snapshot.id]) update_format = True else: # blockCommit snapshot into base @@ -1848,7 +1848,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): 'type': 'qcow2', 'volume_id': snapshot.volume.id} - del(snap_info[snapshot.id]) + del (snap_info[snapshot.id]) self._nova_assisted_vol_snap_delete(context, snapshot, delete_info) diff --git a/cinder/volume/drivers/windows/smbfs.py b/cinder/volume/drivers/windows/smbfs.py index 21f9a5238b1..cd0ee0bafab 100644 --- a/cinder/volume/drivers/windows/smbfs.py +++ b/cinder/volume/drivers/windows/smbfs.py @@ -486,7 +486,7 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin, self._delete(merged_img_path) # TODO(lpetrut): drop snapshot info file usage. - del(snap_info[snapshot.id]) + del (snap_info[snapshot.id]) self._write_info_file(info_path, snap_info) if not isinstance(snapshot, objects.Snapshot): diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index ed58a61a08b..e03767b946b 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -3112,7 +3112,7 @@ class VolumeManager(manager.CleanableManager, # Don't allow volume with replicas to be migrated rep_status = volume.replication_status - if(rep_status is not None and rep_status not in + if (rep_status is not None and rep_status not in [fields.ReplicationStatus.DISABLED, fields.ReplicationStatus.NOT_CAPABLE]): _retype_error(context, volume, old_reservations, diff --git a/cinder/volume/targets/fake.py b/cinder/volume/targets/fake.py index 97841bbac43..727d1d512b8 100644 --- a/cinder/volume/targets/fake.py +++ b/cinder/volume/targets/fake.py @@ -17,7 +17,7 @@ class FakeTarget(iscsi.ISCSITarget): VERSION = '0.1' def _get_target_and_lun(self, context, volume): - return(0, 0) + return (0, 0) def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py index 749fdda4c07..f10b5a6cd81 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py @@ -163,7 +163,7 @@ class BrcdFCZoneClientCLI(object): 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Creating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) - if(iterator_count > 0): + if (iterator_count > 0): zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone @@ -244,7 +244,7 @@ class BrcdFCZoneClientCLI(object): 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Updating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) - if(iterator_count > 0): + if (iterator_count > 0): zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone @@ -350,7 +350,7 @@ class BrcdFCZoneClientCLI(object): def _cfg_trans_abort(self): is_abortable = self._is_trans_abortable() - if(is_abortable): + if (is_abortable): self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT]) def _is_trans_abortable(self): @@ -361,7 +361,7 @@ class BrcdFCZoneClientCLI(object): output = stdout.splitlines() is_abortable = False for line in output: - if(zone_constant.TRANS_ABORTABLE in line): + if (zone_constant.TRANS_ABORTABLE in line): is_abortable = True break if stderr: @@ -437,7 +437,7 @@ class BrcdFCZoneClientCLI(object): """ return_list = [] for line in switch_data: - if not(" NL " in line or " N " in line): + if not (" NL " in line or " N " in line): continue linesplit = line.split(';') if len(linesplit) > 2: diff --git a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py index b8584f7590e..6bc1a8729a5 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py +++ b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py @@ -896,7 +896,7 @@ class BrcdHTTPFCZoneClient(object): timeout = 360 sleep_time = 3 time_elapsed = 0 - while(status != "done"): + while (status != "done"): txn_response = self.connect( zone_constant.GET_METHOD, transURL, "", headers) parsed_data_txn = self.get_parsed_data(txn_response, diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py index f5a9fedd05d..e98d4093502 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py @@ -219,7 +219,7 @@ class CiscoFCSanLookupService(fc_service.FCSanLookupService): """ nsinfo_list = [] for line in switch_data: - if not(" N " in line): + if not (" N " in line): continue linesplit = line.split() if len(linesplit) > 2: diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py index ec91ada1469..af90bb7d608 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py @@ -407,7 +407,7 @@ class CiscoFCZoneClientCLI(object): """ return_list = [] for line in switch_data: - if not(" N " in line): + if not (" N " in line): continue linesplit = line.split() if len(linesplit) > 2: diff --git a/tox.ini b/tox.ini index a932d16c08b..31c3c6d89e7 100644 --- a/tox.ini +++ b/tox.ini @@ -233,9 +233,6 @@ commands = # # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability -# E275: missing whitespace after keyword -# reason: many failures newly triggered in pycodestyle 2.9.0, -# evaluate if fixing is worthwhile # E402 module level import not at top of file # reason: there are numerous places where we import modules # later for legitimate reasons @@ -251,7 +248,7 @@ commands = # reason: no real benefit # G200 Logging statements should not include the exception # reason: Many existing cases of this that may be legitimate -ignore = E251,E275,E402,W503,W504,H101,G200 +ignore = E251,E402,W503,W504,H101,G200 # H904 Delay string interpolations at logging calls. enable-extensions = H106,H203,H904 exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build