Set pep8 version to 1.1 in test_requires

* Fixes bug 1007518
* Changes in pep8 cause new failures
* Fix up the ones we found anyway

Change-Id: I5cd73a252f73893e4672a2e39b667c519423ae3f
This commit is contained in:
John Griffith 2012-06-01 10:46:14 -06:00
parent 2263cf7db7
commit 0cd8f34eed
13 changed files with 35 additions and 35 deletions

View File

@ -31,8 +31,8 @@ def upgrade(migrate_engine):
_warn_on_bytestring=False),
nullable=True)
instances.create_column(instances_os_type)
migrate_engine.execute(instances.update()\
.where(instances.c.os_type == None)\
migrate_engine.execute(instances.update()
.where(instances.c.os_type is None)
.values(os_type='linux'))

View File

@ -40,8 +40,8 @@ def upgrade(migrate_engine):
type_names[row[0]] = row[1]
for type_id, type_name in type_names.iteritems():
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type == type_name)\
migrate_engine.execute(instances.update()
.where(instances.c.instance_type == type_name)
.values(instance_type_id=type_id))
instances.c.instance_type.drop()
@ -67,8 +67,8 @@ def downgrade(migrate_engine):
type_names[row[0]] = row[1]
for type_id, type_name in type_names.iteritems():
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type_id == type_id)\
migrate_engine.execute(instances.update()
.where(instances.c.instance_type_id == type_id)
.values(instance_type=type_name))
instances.c.instance_type_id.drop()

View File

@ -90,7 +90,7 @@ def _assert_no_duplicate_project_ids(quotas):
def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
select = quotas.select().where(quotas.c.deleted == False)
select = quotas.select().where(quotas.c.deleted is False)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))
@ -99,7 +99,7 @@ def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
"""Ensure that there are no duplicate non-deleted quota entries."""
for resource in resources:
select = quotas.select().\
where(quotas.c.deleted == False).\
where(quotas.c.deleted is False).\
where(quotas.c.resource == resource)
results = migrate_engine.execute(select)
_assert_no_duplicate_project_ids(list(results))

View File

@ -31,8 +31,8 @@ def upgrade(migrate_engine):
rows = migrate_engine.execute(instances.select())
for row in rows:
instance_uuid = str(utils.gen_uuid())
migrate_engine.execute(instances.update()\
.where(instances.c.id == row[0])\
migrate_engine.execute(instances.update()
.where(instances.c.id == row[0])
.values(uuid=instance_uuid))

View File

@ -103,7 +103,7 @@ def upgrade(migrate_engine):
# populate the fixed_ips virtual_interface_id column
s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
fixed_ips.c.instance_id != None)
fixed_ips.c.instance_id is not None)
for row in s.execute():
m = select([virtual_interfaces.c.id]).\

View File

@ -36,11 +36,11 @@ def upgrade(migrate_engine):
itypes[instance_type.id] = instance_type.flavorid
for instance_type_id in itypes.keys():
migrate_engine.execute(migrations.update()\
.where(migrations.c.old_flavor_id == itypes[instance_type_id])\
migrate_engine.execute(migrations.update()
.where(migrations.c.old_flavor_id == itypes[instance_type_id])
.values(old_instance_type_id=instance_type_id))
migrate_engine.execute(migrations.update()\
.where(migrations.c.new_flavor_id == itypes[instance_type_id])\
migrate_engine.execute(migrations.update()
.where(migrations.c.new_flavor_id == itypes[instance_type_id])
.values(new_instance_type_id=instance_type_id))
migrations.c.old_flavor_id.drop()
@ -66,13 +66,13 @@ def downgrade(migrate_engine):
itypes[instance_type.flavorid] = instance_type.id
for instance_type_flavorid in itypes.keys():
migrate_engine.execute(migrations.update()\
migrate_engine.execute(migrations.update()
.where(migrations.c.old_instance_type_id ==
itypes[instance_type_flavorid])\
itypes[instance_type_flavorid])
.values(old_flavor_id=instance_type_flavorid))
migrate_engine.execute(migrations.update()\
migrate_engine.execute(migrations.update()
.where(migrations.c.new_instance_type_id ==
itypes[instance_type_flavorid])\
itypes[instance_type_flavorid])
.values(new_flavor_id=instance_type_flavorid))
migrations.c.old_instance_type_id.drop()

View File

@ -31,8 +31,8 @@ def upgrade(migrate_engine):
rows = migrate_engine.execute(virtual_interfaces.select())
for row in rows:
vif_uuid = str(utils.gen_uuid())
migrate_engine.execute(virtual_interfaces.update()\
.where(virtual_interfaces.c.id == row[0])\
migrate_engine.execute(virtual_interfaces.update()
.where(virtual_interfaces.c.id == row[0])
.values(uuid=vif_uuid))

View File

@ -31,8 +31,8 @@ def upgrade(migrate_engine):
rows = migrate_engine.execute(networks.select())
for row in rows:
networks_uuid = str(utils.gen_uuid())
migrate_engine.execute(networks.update()\
.where(networks.c.id == row[0])\
migrate_engine.execute(networks.update()
.where(networks.c.id == row[0])
.values(uuid=networks_uuid))

View File

@ -51,11 +51,11 @@ def upgrade(migrate_engine):
bw_usage_cache.create_column(mac_column)
bw_usage_cache.update()\
.values(mac=select([vifs.c.address])\
.values(mac=select([vifs.c.address])
.where(and_(
networks.c.label == bw_usage_cache.c.network_label,
networks.c.id == vifs.c.network_id,
bw_usage_cache.c.instance_id == vifs.c.instance_id))\
bw_usage_cache.c.instance_id == vifs.c.instance_id))
.as_scalar()).execute()
bw_usage_cache.c.network_label.drop()
@ -87,11 +87,11 @@ def downgrade(migrate_engine):
bw_usage_cache.create_column(network_label_column)
bw_usage_cache.update()\
.values(network_label=select([network.c.label])\
.values(network_label=select([network.c.label])
.where(and_(
network.c.id == vifs.c.network_id,
vifs.c.address == bw_usage_cache.c.mac,
bw_usage_cache.c.instance_id == vifs.c.instance_id))\
bw_usage_cache.c.instance_id == vifs.c.instance_id))
.as_scalar()).execute()
bw_usage_cache.c.mac.drop()

View File

@ -64,6 +64,6 @@ def downgrade(migrate_engine):
for row in migrate_engine.execute(bw_usage_cache.select()):
instance_id = cache[row['mac']]
migrate_engine.execute(bw_usage_cache.update()\
.where(bw_usage_cache.c.id == row['id'])\
migrate_engine.execute(bw_usage_cache.update()
.where(bw_usage_cache.c.id == row['id'])
.values(instance_id=instance_id))

View File

@ -81,7 +81,7 @@ def upgrade(migrate_engine):
if migrate_engine.name == "mysql":
migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
migrate_engine.execute("ALTER TABLE snapshot_id_mappings "\
migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
"Engine=InnoDB")
volumes = Table('volumes', meta, autoload=True)

View File

@ -60,7 +60,7 @@ def import_normalize(line):
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
@ -212,8 +212,8 @@ def cinder_import_alphabetical(physical_line, line_number, lines):
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0,
"CINDER N306: imports not in alphabetical order (%s, %s)"
@ -244,7 +244,7 @@ def cinder_docstring_one_line(physical_line):
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "CINDER N402: one line docstring needs a period"

View File

@ -6,6 +6,6 @@ mox==0.5.3
nose
nosexcover
openstack.nose_plugin
pep8>=1.0
pep8==1.1
sphinx>=1.1.2
MySQL-python