Merge "Python3 common patterns"
This commit is contained in:
commit
badc90cf0f
@ -188,7 +188,7 @@ class UnityClient(object):
|
||||
host = self.system.create_host(name=name)
|
||||
|
||||
host_initiators_ids = self.get_host_initiator_ids(host)
|
||||
un_registered = filter(lambda h: h not in host_initiators_ids, uids)
|
||||
un_registered = [h for h in uids if h not in host_initiators_ids]
|
||||
for uid in un_registered:
|
||||
host.add_initiator(uid, force_create=True)
|
||||
|
||||
|
@ -102,8 +102,8 @@ class CommonAdapter(object):
|
||||
if pool_names is not None:
|
||||
# Filter out the empty string in the list.
|
||||
pool_names = [name.strip()
|
||||
for name in filter(lambda x: len(x.strip()) != 0,
|
||||
pool_names)]
|
||||
for name in [x for x in pool_names
|
||||
if len(x.strip()) != 0]]
|
||||
if len(pool_names) == 0:
|
||||
raise exception.InvalidConfigurationValue(
|
||||
option='[{group}] storage_vnx_pool_names'.format(
|
||||
@ -116,8 +116,8 @@ class CommonAdapter(object):
|
||||
io_port_list = self.config.io_port_list
|
||||
if io_port_list is not None:
|
||||
io_port_list = [port.strip().upper()
|
||||
for port in filter(lambda x: len(x.strip()) != 0,
|
||||
io_port_list)]
|
||||
for port in [x for x in io_port_list
|
||||
if len(x.strip()) != 0]]
|
||||
if len(io_port_list) == 0:
|
||||
# io_port_list is allowed to be an empty list, which means
|
||||
# none of the ports will be registered.
|
||||
|
@ -276,7 +276,7 @@ class HuaweiConf(object):
|
||||
# Step 4, remove invalid configure pairs, convert to:
|
||||
# [['Name:xxx', 'TargetPortGroup:xxx'],
|
||||
# ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']]
|
||||
initiator_infos = map(lambda x: filter(lambda y: y, x),
|
||||
initiator_infos = map(lambda x: [y for y in x if y],
|
||||
initiator_infos)
|
||||
|
||||
# Step 5, make initiators configure dict, convert to:
|
||||
|
@ -547,8 +547,8 @@ class DataMotionMixin(object):
|
||||
The 'slowest' mirror determines the best update that occurred on a
|
||||
given replication target.
|
||||
"""
|
||||
filtered_mirrors = list(filter(lambda x: x.get('destination-volume')
|
||||
in flexvols, mirrors))
|
||||
filtered_mirrors = [x for x in mirrors
|
||||
if x.get('destination-volume')in flexvols]
|
||||
sorted_mirrors = sorted(filtered_mirrors,
|
||||
key=lambda x: int(x.get('lag-time')),
|
||||
reverse=True)
|
||||
|
@ -459,9 +459,8 @@ class NetAppESeriesLibrary(object):
|
||||
def _get_ordered_images_in_snapshot_group(self, snapshot_group_id):
|
||||
images = self._client.list_snapshot_images()
|
||||
if images:
|
||||
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
|
||||
snapshot_group_id),
|
||||
images)
|
||||
filtered_images = [img for img in images if img['pitGroupRef'] ==
|
||||
snapshot_group_id]
|
||||
sorted_imgs = sorted(filtered_images, key=lambda x: x[
|
||||
'pitTimestamp'])
|
||||
return sorted_imgs
|
||||
@ -826,12 +825,11 @@ class NetAppESeriesLibrary(object):
|
||||
groups_for_v = self._get_snapshot_groups_for_volume(vol)
|
||||
|
||||
# Filter out reserved snapshot groups
|
||||
groups = filter(lambda g: self.SNAPSHOT_VOL_COPY_SUFFIX not in g[
|
||||
'label'], groups_for_v)
|
||||
groups = [g for g in groups_for_v
|
||||
if self.SNAPSHOT_VOL_COPY_SUFFIX not in g['label']]
|
||||
|
||||
# Filter out groups that are part of a consistency group
|
||||
groups = filter(lambda g: not g['consistencyGroup'], groups)
|
||||
|
||||
groups = [g for g in groups if not g['consistencyGroup']]
|
||||
# Find all groups with free snapshot capacity
|
||||
groups = [group for group in groups if group.get('snapshotCount') <
|
||||
self.MAX_SNAPSHOT_COUNT]
|
||||
@ -1630,8 +1628,8 @@ class NetAppESeriesLibrary(object):
|
||||
|
||||
pool_ids = set(pool.get("volumeGroupRef") for pool in storage_pools)
|
||||
|
||||
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
|
||||
pool_ids, all_disks)
|
||||
relevant_disks = [x for x in all_disks
|
||||
if x.get('currentVolumeGroupRef') in pool_ids]
|
||||
for drive in relevant_disks:
|
||||
current_vol_group = drive.get('currentVolumeGroupRef')
|
||||
if current_vol_group not in ssc_stats:
|
||||
|
@ -1961,13 +1961,14 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
endpoint=remote['endpoint'])
|
||||
primary_vols = self._map_sf_volumes(volumes)
|
||||
for v in volumes:
|
||||
remote_vlist = filter(lambda sfv: sfv['cinder_id'] == v['id'],
|
||||
remote_vols)
|
||||
remote_vlist = [sfv for sfv in remote_vols
|
||||
if sfv['cinder_id'] == v['id']]
|
||||
|
||||
if len(remote_vlist) > 0:
|
||||
remote_vol = remote_vlist[0]
|
||||
self._failover_volume(remote_vol, remote)
|
||||
primary_vol = filter(lambda sfv: sfv['cinder_id'] == v['id'],
|
||||
primary_vols)[0]
|
||||
primary_vol = [sfv for sfv in primary_vols if
|
||||
sfv['cinder_id'] == v['id']][0]
|
||||
if len(primary_vol['volumePairs']) > 0:
|
||||
self._issue_api_request(
|
||||
'RemoveVolumePair',
|
||||
|
@ -264,7 +264,7 @@ class SCSTAdm(iscsi.ISCSITarget):
|
||||
out = out.split("\n")[2]
|
||||
if "IncomingUser" in out:
|
||||
out = out.split(" ")
|
||||
out = filter(lambda a: a != "", out)
|
||||
out = [a for a in out if a != ""]
|
||||
return (out[1], out[2])
|
||||
else:
|
||||
return None
|
||||
|
Loading…
Reference in New Issue
Block a user