Remove E12 errors from tox.ini Flake ignores.
This removes the E12 from ignores and fixes up the existing pep8 errors that we were ignoring. Change-Id: I5d60f1eed768fcae01a708fcf9ea324844c6376d
This commit is contained in:
parent
dac54e709a
commit
b7ceb409ec
@ -34,8 +34,9 @@ import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
@ -30,8 +30,9 @@ import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
@ -691,8 +691,7 @@ class ServiceCommands(object):
|
||||
ctxt = context.get_admin_context()
|
||||
services = db.service_get_all(ctxt)
|
||||
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
|
||||
print print_format % (
|
||||
_('Binary'),
|
||||
print print_format % (_('Binary'),
|
||||
_('Host'),
|
||||
_('Zone'),
|
||||
_('Status'),
|
||||
|
@ -51,8 +51,7 @@ if __name__ == '__main__':
|
||||
if FLAGS.enabled_backends:
|
||||
for backend in FLAGS.enabled_backends:
|
||||
host = "%s@%s" % (FLAGS.host, backend)
|
||||
server = service.Service.create(
|
||||
host=host,
|
||||
server = service.Service.create(host=host,
|
||||
service_name=backend)
|
||||
launcher.launch_server(server)
|
||||
else:
|
||||
|
@ -80,8 +80,8 @@ if __name__ == '__main__':
|
||||
print _("Found %d volumes") % len(volumes)
|
||||
for volume_ref in volumes:
|
||||
try:
|
||||
cinder.volume.utils.notify_usage_exists(
|
||||
admin_context, volume_ref)
|
||||
cinder.volume.utils.notify_usage_exists(admin_context,
|
||||
volume_ref)
|
||||
except Exception, e:
|
||||
print traceback.format_exc(e)
|
||||
|
||||
|
@ -161,7 +161,8 @@ class VolumeTransferController(wsgi.Controller):
|
||||
|
||||
name = transfer.get('name', None)
|
||||
|
||||
LOG.audit(_("Creating transfer of volume %(volume_id)s"), locals(),
|
||||
LOG.audit(_("Creating transfer of volume %s"),
|
||||
volume_id,
|
||||
context=context)
|
||||
|
||||
try:
|
||||
@ -194,7 +195,7 @@ class VolumeTransferController(wsgi.Controller):
|
||||
msg = _("Incorrect request body format")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
LOG.audit(_("Accepting transfer %(transfer_id)s"), locals(),
|
||||
LOG.audit(_("Accepting transfer %s"), transfer_id,
|
||||
context=context)
|
||||
|
||||
try:
|
||||
@ -206,7 +207,8 @@ class VolumeTransferController(wsgi.Controller):
|
||||
except exception.InvalidVolume as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
|
||||
transfer = self._view_builder.summary(req,
|
||||
transfer = \
|
||||
self._view_builder.summary(req,
|
||||
dict(accepted_transfer.iteritems()))
|
||||
return transfer
|
||||
|
||||
|
@ -94,8 +94,10 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
|
||||
|
||||
def _extract_scheduler_hints(self, volume_node):
|
||||
"""Marshal the scheduler hints attribute of a parsed request."""
|
||||
node = self.find_first_child_named_in_namespace(volume_node,
|
||||
SCHEDULER_HINTS_NAMESPACE, "scheduler_hints")
|
||||
node =\
|
||||
self.find_first_child_named_in_namespace(volume_node,
|
||||
SCHEDULER_HINTS_NAMESPACE,
|
||||
"scheduler_hints")
|
||||
if node:
|
||||
scheduler_hints = {}
|
||||
for child in self.extract_elements(node):
|
||||
|
@ -55,8 +55,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
req.content_type = 'application/json'
|
||||
body = {'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
}
|
||||
'volume_id': '1', }
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
self.assertEqual(202, res.status_int)
|
||||
@ -78,8 +77,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
body = {'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
'scheduler_hints': {'a': 'b'},
|
||||
}
|
||||
'scheduler_hints': {'a': 'b'}, }
|
||||
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
@ -93,8 +91,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
'scheduler_hints': 'a', }
|
||||
}
|
||||
'scheduler_hints': 'a', }}
|
||||
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
|
@ -51,8 +51,7 @@ def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
|
||||
|
||||
|
||||
def stub_volume_type_extra_specs():
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
|
@ -25,8 +25,7 @@ from cinder.volume import volume_types
|
||||
|
||||
|
||||
def stub_volume_type(id):
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
|
@ -26,8 +26,7 @@ from cinder.volume import volume_types
|
||||
|
||||
|
||||
def stub_volume_type(id):
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
|
@ -62,7 +62,8 @@ class HostFiltersTestCase(test.TestCase):
|
||||
stub_out_https_backend(self.stubs)
|
||||
self.context = context.RequestContext('fake', 'fake')
|
||||
self.json_query = jsonutils.dumps(
|
||||
['and', ['>=', '$free_capacity_gb', 1024],
|
||||
['and',
|
||||
['>=', '$free_capacity_gb', 1024],
|
||||
['>=', '$total_capacity_gb', 10 * 1024]])
|
||||
# This has a side effect of testing 'get_filter_classes'
|
||||
# when specifying a method (in this case, our standard filters)
|
||||
|
@ -45,7 +45,8 @@ def _quota_reserve(context, project_id):
|
||||
for i in range(3):
|
||||
resource = 'res%d' % i
|
||||
quotas[resource] = db.quota_create(context, project_id, resource, i)
|
||||
resources[resource] = ReservableResource(resource,
|
||||
resources[resource] = ReservableResource(
|
||||
resource,
|
||||
get_sync(resource, i), 'quota_res_%d' % i)
|
||||
deltas[resource] = i
|
||||
return db.quota_reserve(context, resources, quotas, deltas,
|
||||
@ -278,7 +279,8 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
for j in xrange(3):
|
||||
db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
|
||||
for i in xrange(3):
|
||||
self.assertEqual((3, 300), db.volume_data_get_for_host(
|
||||
self.assertEqual((3, 300),
|
||||
db.volume_data_get_for_host(
|
||||
self.ctxt, 'h%d' % i))
|
||||
|
||||
def test_volume_data_get_for_project(self):
|
||||
@ -289,12 +291,14 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
'host': 'h-%d-%d' % (i, j),
|
||||
})
|
||||
for i in xrange(3):
|
||||
self.assertEqual((3, 300), db.volume_data_get_for_project(
|
||||
self.assertEqual((3, 300),
|
||||
db.volume_data_get_for_project(
|
||||
self.ctxt, 'p%d' % i))
|
||||
|
||||
def test_volume_detached(self):
|
||||
volume = db.volume_create(self.ctxt, {})
|
||||
db.volume_attached(self.ctxt, volume['id'],
|
||||
db.volume_attached(self.ctxt,
|
||||
volume['id'],
|
||||
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
|
||||
db.volume_detached(self.ctxt, volume['id'])
|
||||
volume = db.volume_get(self.ctxt, volume['id'])
|
||||
@ -315,7 +319,8 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
self.ctxt, volume['id'])
|
||||
|
||||
def test_volume_get_all(self):
|
||||
volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i})
|
||||
volumes = [db.volume_create(self.ctxt,
|
||||
{'host': 'h%d' % i, 'size': i})
|
||||
for i in xrange(3)]
|
||||
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
|
||||
self.ctxt, None, None, 'host', None))
|
||||
@ -337,7 +342,8 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
instance_uuid = str(uuidutils.uuid.uuid1())
|
||||
instance_uuids.append(instance_uuid)
|
||||
volumes.append([db.volume_create(self.ctxt,
|
||||
{'instance_uuid': instance_uuid}) for j in xrange(3)])
|
||||
{'instance_uuid': instance_uuid})
|
||||
for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_instance_uuid(
|
||||
@ -402,8 +408,10 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
self._assertEqualObjects(reservation, reservation_db)
|
||||
|
||||
def test_reservation_get_nonexistent(self):
|
||||
self.assertRaises(exception.ReservationNotFound, db.reservation_get,
|
||||
self.ctxt, 'non-exitent-resevation-uuid')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
'non-exitent-resevation-uuid')
|
||||
|
||||
def test_reservation_commit(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
@ -411,18 +419,23 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
db.reservation_get(self.ctxt, reservations[0])
|
||||
db.reservation_commit(self.ctxt, reservations, 'project1')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, reservations[0])
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
reservations[0])
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 2},
|
||||
'res2': {'reserved': 0, 'in_use': 4}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
def test_reservation_rollback(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
@ -430,18 +443,24 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
db.reservation_get(self.ctxt, reservations[0])
|
||||
db.reservation_rollback(self.ctxt, reservations, 'project1')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, reservations[0])
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
reservations[0])
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
def test_reservation_expire(self):
|
||||
self.values['expire'] = datetime.utcnow() + timedelta(days=1)
|
||||
@ -452,8 +471,10 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
|
||||
class DBAPIQuotaTestCase(BaseTest):
|
||||
@ -492,11 +513,18 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||
|
||||
def test_quota_update_nonexistent(self):
|
||||
self.assertRaises(exception.ProjectQuotaNotFound,
|
||||
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
|
||||
db.quota_update,
|
||||
self.ctxt,
|
||||
'project1',
|
||||
'resource1',
|
||||
42)
|
||||
|
||||
def test_quota_get_nonexistent(self):
|
||||
self.assertRaises(exception.ProjectQuotaNotFound,
|
||||
db.quota_get, self.ctxt, 'project1', 'resource1')
|
||||
db.quota_get,
|
||||
self.ctxt,
|
||||
'project1',
|
||||
'resource1')
|
||||
|
||||
def test_quota_reserve(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
@ -512,16 +540,21 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||
db.quota_destroy_all_by_project(self.ctxt, 'project1')
|
||||
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
|
||||
{'project_id': 'project1'})
|
||||
self.assertEqual(db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'),
|
||||
self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt,
|
||||
'project1'),
|
||||
{'project_id': 'project1'})
|
||||
for r in reservations:
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, r)
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
r)
|
||||
|
||||
def test_quota_usage_get_nonexistent(self):
|
||||
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
|
||||
self.ctxt, 'p1', 'nonexitent_resource')
|
||||
self.assertRaises(exception.QuotaUsageNotFound,
|
||||
db.quota_usage_get,
|
||||
self.ctxt,
|
||||
'p1',
|
||||
'nonexitent_resource')
|
||||
|
||||
def test_quota_usage_get(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'p1')
|
||||
|
@ -71,8 +71,8 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
self._configuration.glusterfs_sparsed_volumes = True
|
||||
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self._driver = glusterfs.GlusterfsDriver(
|
||||
configuration=self._configuration)
|
||||
self._driver =\
|
||||
glusterfs.GlusterfsDriver(configuration=self._configuration)
|
||||
self._driver.shares = {}
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -430,21 +430,24 @@ class NfsDriverTestCase(test.TestCase):
|
||||
drv = self._driver
|
||||
self.configuration.nfs_oversub_ratio = -1
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
|
||||
"""do_setup should throw error if nfs_used_ratio is less than 0."""
|
||||
drv = self._driver
|
||||
self.configuration.nfs_used_ratio = -1
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
|
||||
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
|
||||
drv = self._driver
|
||||
self.configuration.nfs_used_ratio = 2
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
|
||||
"""do_setup should throw error if nfs client is not installed."""
|
||||
|
@ -350,8 +350,7 @@ class RBDTestCase(test.TestCase):
|
||||
'auth_enabled': False,
|
||||
'auth_username': None,
|
||||
'secret_type': 'ceph',
|
||||
'secret_uuid': None,
|
||||
}
|
||||
'secret_uuid': None, }
|
||||
}
|
||||
actual = self.driver.initialize_connection(dict(name=name), None)
|
||||
self.assertDictMatch(expected, actual)
|
||||
|
@ -18,8 +18,7 @@
|
||||
|
||||
# Importing full names to not pollute the namespace and cause possible
|
||||
# collisions with use of 'from cinder.volume import <foo>' elsewhere.
|
||||
import cinder.flags
|
||||
import cinder.openstack.common.importutils
|
||||
import cinder.flags as flags
|
||||
import cinder.openstack.common.importutils as import_utils
|
||||
|
||||
API = cinder.openstack.common.importutils.import_class(
|
||||
cinder.flags.FLAGS.volume_api_class)
|
||||
API = import_utils.import_class(flags.FLAGS.volume_api_class)
|
||||
|
@ -422,8 +422,8 @@ class EMCSMISCommon():
|
||||
|
||||
storage_system = vol_instance['SystemName']
|
||||
|
||||
configservice = self._find_storage_configuration_service(
|
||||
storage_system)
|
||||
configservice =\
|
||||
self._find_storage_configuration_service(storage_system)
|
||||
if configservice is None:
|
||||
exception_message = (_("Error Delete Volume: %(volumename)s. "
|
||||
"Storage Configuration Service not found.")
|
||||
@ -443,9 +443,10 @@ class EMCSMISCommon():
|
||||
'name': volumename,
|
||||
'vol_instance': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'EMCReturnToStoragePool',
|
||||
configservice, TheElements=[vol_instance.path])
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('EMCReturnToStoragePool',
|
||||
configservice,
|
||||
TheElements=[vol_instance.path])
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -506,8 +507,8 @@ class EMCSMISCommon():
|
||||
'elementname': snapshotname,
|
||||
'sourceelement': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'CreateElementReplica', repservice,
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('CreateElementReplica', repservice,
|
||||
ElementName=snapshotname,
|
||||
SyncType=self._getnum(7, '16'),
|
||||
SourceElement=vol_instance.path)
|
||||
@ -550,8 +551,8 @@ class EMCSMISCommon():
|
||||
% {'snapshot': snapshotname,
|
||||
'volume': volumename})
|
||||
|
||||
sync_name, storage_system = self._find_storage_sync_sv_sv(
|
||||
snapshotname, volumename, False)
|
||||
sync_name, storage_system =\
|
||||
self._find_storage_sync_sv_sv(snapshotname, volumename, False)
|
||||
if sync_name is None:
|
||||
LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
|
||||
'not found on the array. No snapshot to delete.')
|
||||
@ -578,8 +579,8 @@ class EMCSMISCommon():
|
||||
'service': str(repservice),
|
||||
'sync_name': str(sync_name)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(19, '16'),
|
||||
Synchronization=sync_name)
|
||||
@ -651,8 +652,8 @@ class EMCSMISCommon():
|
||||
'initiator': initiators})
|
||||
|
||||
if lunmask_ctrl is None:
|
||||
rc, controller = self.conn.InvokeMethod(
|
||||
'ExposePaths',
|
||||
rc, controller =\
|
||||
self.conn.InvokeMethod('ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
InitiatorPortIDs=initiators,
|
||||
DeviceAccesses=[self._getnum(2, '16')])
|
||||
@ -661,8 +662,8 @@ class EMCSMISCommon():
|
||||
'LunMaskingSCSIProtocolController: '
|
||||
'%(lunmasking)s')
|
||||
% {'lunmasking': str(lunmask_ctrl)})
|
||||
rc, controller = self.conn.InvokeMethod(
|
||||
'ExposePaths',
|
||||
rc, controller =\
|
||||
self.conn.InvokeMethod('ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
DeviceAccesses=[self._getnum(2, '16')],
|
||||
ProtocolControllers=[lunmask_ctrl])
|
||||
@ -724,9 +725,11 @@ class EMCSMISCommon():
|
||||
'masking_group': str(masking_group),
|
||||
'vol': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'AddMembers', configservice,
|
||||
MaskingGroup=masking_group, Members=[vol_instance.path])
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('AddMembers',
|
||||
configservice,
|
||||
MaskingGroup=masking_group,
|
||||
Members=[vol_instance.path])
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -864,7 +867,7 @@ class EMCSMISCommon():
|
||||
|
||||
def _get_storage_type(self, filename=None):
|
||||
"""Get the storage type from the config file."""
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -884,7 +887,7 @@ class EMCSMISCommon():
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
def _get_masking_view(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -902,7 +905,7 @@ class EMCSMISCommon():
|
||||
return None
|
||||
|
||||
def _get_ecom_cred(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -924,7 +927,7 @@ class EMCSMISCommon():
|
||||
return None
|
||||
|
||||
def _get_ecom_server(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -1219,8 +1222,8 @@ class EMCSMISCommon():
|
||||
for ctrl in controllers:
|
||||
if storage_system != ctrl['SystemName']:
|
||||
continue
|
||||
associators = self.conn.Associators(
|
||||
ctrl,
|
||||
associators =\
|
||||
self.conn.Associators(ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
for assoc in associators:
|
||||
# if EMC_StorageHardwareID matches the initiator,
|
||||
@ -1253,12 +1256,14 @@ class EMCSMISCommon():
|
||||
connector):
|
||||
foundCtrl = None
|
||||
initiators = self._find_initiator_names(connector)
|
||||
controllers = self.conn.AssociatorNames(
|
||||
controllers =\
|
||||
self.conn.AssociatorNames(
|
||||
vol_instance.path,
|
||||
resultClass='EMC_LunMaskingSCSIProtocolController')
|
||||
|
||||
for ctrl in controllers:
|
||||
associators = self.conn.Associators(
|
||||
associators =\
|
||||
self.conn.Associators(
|
||||
ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
for assoc in associators:
|
||||
|
@ -35,8 +35,8 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.common = emc_smis_common.EMCSMISCommon(
|
||||
'iSCSI',
|
||||
self.common =\
|
||||
emc_smis_common.EMCSMISCommon('iSCSI',
|
||||
configuration=self.configuration)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
|
Loading…
Reference in New Issue
Block a user