Remove E12 errors from tox.ini Flake ignores.
This removes the E12 from ignores and fixes up the existing pep8 errors that we were ignoring. Change-Id: I5d60f1eed768fcae01a708fcf9ea324844c6376d
This commit is contained in:
parent
dac54e709a
commit
b7ceb409ec
@ -34,8 +34,9 @@ import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
@ -30,8 +30,9 @@ import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
@ -691,13 +691,12 @@ class ServiceCommands(object):
|
||||
ctxt = context.get_admin_context()
|
||||
services = db.service_get_all(ctxt)
|
||||
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
|
||||
print print_format % (
|
||||
_('Binary'),
|
||||
_('Host'),
|
||||
_('Zone'),
|
||||
_('Status'),
|
||||
_('State'),
|
||||
_('Updated At'))
|
||||
print print_format % (_('Binary'),
|
||||
_('Host'),
|
||||
_('Zone'),
|
||||
_('Status'),
|
||||
_('State'),
|
||||
_('Updated At'))
|
||||
for svc in services:
|
||||
alive = utils.service_is_up(svc)
|
||||
art = ":-)" if alive else "XXX"
|
||||
|
@ -51,9 +51,8 @@ if __name__ == '__main__':
|
||||
if FLAGS.enabled_backends:
|
||||
for backend in FLAGS.enabled_backends:
|
||||
host = "%s@%s" % (FLAGS.host, backend)
|
||||
server = service.Service.create(
|
||||
host=host,
|
||||
service_name=backend)
|
||||
server = service.Service.create(host=host,
|
||||
service_name=backend)
|
||||
launcher.launch_server(server)
|
||||
else:
|
||||
server = service.Service.create(binary='cinder-volume')
|
||||
|
@ -80,8 +80,8 @@ if __name__ == '__main__':
|
||||
print _("Found %d volumes") % len(volumes)
|
||||
for volume_ref in volumes:
|
||||
try:
|
||||
cinder.volume.utils.notify_usage_exists(
|
||||
admin_context, volume_ref)
|
||||
cinder.volume.utils.notify_usage_exists(admin_context,
|
||||
volume_ref)
|
||||
except Exception, e:
|
||||
print traceback.format_exc(e)
|
||||
|
||||
|
@ -161,8 +161,9 @@ class VolumeTransferController(wsgi.Controller):
|
||||
|
||||
name = transfer.get('name', None)
|
||||
|
||||
LOG.audit(_("Creating transfer of volume %(volume_id)s"), locals(),
|
||||
context=context)
|
||||
LOG.audit(_("Creating transfer of volume %s"),
|
||||
volume_id,
|
||||
context=context)
|
||||
|
||||
try:
|
||||
new_transfer = self.transfer_api.create(context, volume_id, name)
|
||||
@ -194,8 +195,8 @@ class VolumeTransferController(wsgi.Controller):
|
||||
msg = _("Incorrect request body format")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
LOG.audit(_("Accepting transfer %(transfer_id)s"), locals(),
|
||||
context=context)
|
||||
LOG.audit(_("Accepting transfer %s"), transfer_id,
|
||||
context=context)
|
||||
|
||||
try:
|
||||
accepted_transfer = self.transfer_api.accept(context, transfer_id,
|
||||
@ -206,8 +207,9 @@ class VolumeTransferController(wsgi.Controller):
|
||||
except exception.InvalidVolume as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
|
||||
transfer = self._view_builder.summary(req,
|
||||
dict(accepted_transfer.iteritems()))
|
||||
transfer = \
|
||||
self._view_builder.summary(req,
|
||||
dict(accepted_transfer.iteritems()))
|
||||
return transfer
|
||||
|
||||
def delete(self, req, id):
|
||||
|
@ -33,7 +33,7 @@ from cinder.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
SCHEDULER_HINTS_NAMESPACE =\
|
||||
"http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
|
||||
"http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
@ -94,8 +94,10 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
|
||||
|
||||
def _extract_scheduler_hints(self, volume_node):
|
||||
"""Marshal the scheduler hints attribute of a parsed request."""
|
||||
node = self.find_first_child_named_in_namespace(volume_node,
|
||||
SCHEDULER_HINTS_NAMESPACE, "scheduler_hints")
|
||||
node =\
|
||||
self.find_first_child_named_in_namespace(volume_node,
|
||||
SCHEDULER_HINTS_NAMESPACE,
|
||||
"scheduler_hints")
|
||||
if node:
|
||||
scheduler_hints = {}
|
||||
for child in self.extract_elements(node):
|
||||
|
@ -71,7 +71,7 @@ class ViewBuilder(object):
|
||||
# check for existing key
|
||||
for limit in limits:
|
||||
if (limit["uri"] == rate_limit["URI"] and
|
||||
limit["regex"] == rate_limit["regex"]):
|
||||
limit["regex"] == rate_limit["regex"]):
|
||||
_rate_limit_key = limit
|
||||
break
|
||||
|
||||
|
@ -79,8 +79,8 @@ class ViewBuilder(common.ViewBuilder):
|
||||
transfers_list = [func(request, transfer)['transfer'] for transfer in
|
||||
transfers]
|
||||
transfers_links = self._get_collection_links(request,
|
||||
transfers,
|
||||
self._collection_name)
|
||||
transfers,
|
||||
self._collection_name)
|
||||
transfers_dict = dict(transfers=transfers_list)
|
||||
|
||||
if transfers_links:
|
||||
|
@ -20,4 +20,4 @@ import cinder.flags
|
||||
import cinder.openstack.common.importutils
|
||||
|
||||
API = cinder.openstack.common.importutils.import_class(
|
||||
cinder.flags.FLAGS.backup_api_class)
|
||||
cinder.flags.FLAGS.backup_api_class)
|
||||
|
@ -2028,7 +2028,7 @@ def backup_destroy(context, backup_id):
|
||||
@require_context
|
||||
def transfer_get(context, transfer_id, session=None):
|
||||
query = model_query(context, models.Transfer,
|
||||
session=session).\
|
||||
session=session).\
|
||||
filter_by(id=transfer_id)
|
||||
|
||||
if not is_admin_context(context):
|
||||
@ -2069,8 +2069,8 @@ def transfer_get_all_by_project(context, project_id):
|
||||
|
||||
volume = models.Volume
|
||||
query = model_query(context, models.Transfer).\
|
||||
options(joinedload('volume')).\
|
||||
filter(volume.project_id == project_id)
|
||||
options(joinedload('volume')).\
|
||||
filter(volume.project_id == project_id)
|
||||
results = query.all()
|
||||
return _translate_transfers(results)
|
||||
|
||||
|
@ -51,7 +51,7 @@ def patched_with_engine(f, *a, **kw):
|
||||
# on that version or higher, this can be removed
|
||||
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
|
||||
if (not hasattr(migrate, '__version__') or
|
||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
||||
migrate_util.with_engine = patched_with_engine
|
||||
|
||||
|
||||
|
@ -251,7 +251,7 @@ class Reservation(BASE, CinderBase):
|
||||
"QuotaUsage",
|
||||
foreign_keys=usage_id,
|
||||
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
|
||||
'QuotaUsage.deleted == 0)')
|
||||
'QuotaUsage.deleted == 0)')
|
||||
|
||||
|
||||
class Snapshot(BASE, CinderBase):
|
||||
|
@ -33,9 +33,9 @@ from cinder.openstack.common import timeutils
|
||||
|
||||
|
||||
scheduler_json_config_location_opt = cfg.StrOpt(
|
||||
'scheduler_json_config_location',
|
||||
default='',
|
||||
help='Absolute path to scheduler configuration JSON file.')
|
||||
'scheduler_json_config_location',
|
||||
default='',
|
||||
help='Absolute path to scheduler configuration JSON file.')
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -97,7 +97,7 @@ class SchedulerOptions(object):
|
||||
|
||||
last_modified = self._get_file_timestamp(filename)
|
||||
if (not last_modified or not self.last_modified or
|
||||
last_modified > self.last_modified):
|
||||
last_modified > self.last_modified):
|
||||
self.data = self._load_file(self._get_file_handle(filename))
|
||||
self.last_modified = last_modified
|
||||
if not self.data:
|
||||
|
@ -277,7 +277,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(backup_detail.item(0).attributes.length, 11)
|
||||
self.assertEqual(
|
||||
backup_detail.item(0).getAttribute('availability_zone'), 'az1')
|
||||
backup_detail.item(0).getAttribute('availability_zone'), 'az1')
|
||||
self.assertEqual(
|
||||
backup_detail.item(0).getAttribute('container'), 'volumebackups')
|
||||
self.assertEqual(
|
||||
@ -288,17 +288,17 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(
|
||||
backup_detail.item(0).getAttribute('id'), backup_id1)
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(0).getAttribute('object_count')), 0)
|
||||
int(backup_detail.item(0).getAttribute('object_count')), 0)
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(0).getAttribute('size')), 0)
|
||||
int(backup_detail.item(0).getAttribute('size')), 0)
|
||||
self.assertEqual(
|
||||
backup_detail.item(0).getAttribute('status'), 'creating')
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(0).getAttribute('volume_id')), 1)
|
||||
int(backup_detail.item(0).getAttribute('volume_id')), 1)
|
||||
|
||||
self.assertEqual(backup_detail.item(1).attributes.length, 11)
|
||||
self.assertEqual(
|
||||
backup_detail.item(1).getAttribute('availability_zone'), 'az1')
|
||||
backup_detail.item(1).getAttribute('availability_zone'), 'az1')
|
||||
self.assertEqual(
|
||||
backup_detail.item(1).getAttribute('container'), 'volumebackups')
|
||||
self.assertEqual(
|
||||
@ -330,13 +330,13 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(
|
||||
backup_detail.item(2).getAttribute('id'), backup_id3)
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(2).getAttribute('object_count')), 0)
|
||||
int(backup_detail.item(2).getAttribute('object_count')), 0)
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(2).getAttribute('size')), 0)
|
||||
int(backup_detail.item(2).getAttribute('size')), 0)
|
||||
self.assertEqual(
|
||||
backup_detail.item(2).getAttribute('status'), 'creating')
|
||||
self.assertEqual(
|
||||
int(backup_detail.item(2).getAttribute('volume_id')), 1)
|
||||
int(backup_detail.item(2).getAttribute('volume_id')), 1)
|
||||
|
||||
db.backup_destroy(context.get_admin_context(), backup_id3)
|
||||
db.backup_destroy(context.get_admin_context(), backup_id2)
|
||||
|
@ -33,7 +33,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
super(SchedulerHintsTestCase, self).setUp()
|
||||
self.fake_instance = stubs.stub_volume(1, uuid=UUID)
|
||||
self.fake_instance['created_at'] =\
|
||||
datetime.datetime(2013, 1, 1, 1, 1, 1)
|
||||
datetime.datetime(2013, 1, 1, 1, 1, 1)
|
||||
self.flags(
|
||||
osapi_volume_extension=[
|
||||
'cinder.api.contrib.select_extensions'],
|
||||
@ -55,8 +55,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
req.content_type = 'application/json'
|
||||
body = {'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
}
|
||||
'volume_id': '1', }
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
self.assertEqual(202, res.status_int)
|
||||
@ -78,8 +77,7 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
body = {'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
'scheduler_hints': {'a': 'b'},
|
||||
}
|
||||
'scheduler_hints': {'a': 'b'}, }
|
||||
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
@ -90,11 +88,10 @@ class SchedulerHintsTestCase(test.TestCase):
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
body = {'volume': {
|
||||
'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
'scheduler_hints': 'a', }
|
||||
}
|
||||
'id': id,
|
||||
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'volume_id': '1',
|
||||
'scheduler_hints': 'a', }}
|
||||
|
||||
req.body = jsonutils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
|
@ -51,12 +51,11 @@ def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
|
||||
|
||||
|
||||
def stub_volume_type_extra_specs():
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
return specs
|
||||
|
||||
|
||||
|
@ -25,12 +25,11 @@ from cinder.volume import volume_types
|
||||
|
||||
|
||||
def stub_volume_type(id):
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
|
||||
|
||||
|
||||
|
@ -543,9 +543,9 @@ class VolumeTransferAPITestCase(test.TestCase):
|
||||
def test_accept_transfer_with_VolumeLimitExceeded(self):
|
||||
|
||||
def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls,
|
||||
context,
|
||||
transfer,
|
||||
volume_id):
|
||||
context,
|
||||
transfer,
|
||||
volume_id):
|
||||
raise exception.VolumeLimitExceeded(allowed=1)
|
||||
|
||||
self.stubs.Set(cinder.transfer.API, 'accept',
|
||||
|
@ -181,7 +181,7 @@ class PaginationParamsTest(test.TestCase):
|
||||
def test_valid_marker(self):
|
||||
""" Test valid marker param. """
|
||||
req = webob.Request.blank(
|
||||
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
|
||||
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
|
||||
self.assertEqual(common.get_pagination_params(req),
|
||||
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
|
||||
|
||||
|
@ -26,12 +26,11 @@ from cinder.volume import volume_types
|
||||
|
||||
|
||||
def stub_volume_type(id):
|
||||
specs = {
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
specs = {"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
"key4": "value4",
|
||||
"key5": "value5"}
|
||||
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
|
||||
|
||||
|
||||
|
@ -77,7 +77,7 @@ class TransfersTableTestCase(test.TestCase):
|
||||
self.assertEquals(xfer.volume_id, volume_id1, "Unexpected volume_id")
|
||||
|
||||
nctxt = context.RequestContext(user_id='new_user_id',
|
||||
project_id='new_project_id')
|
||||
project_id='new_project_id')
|
||||
self.assertRaises(exception.TransferNotFound,
|
||||
db.transfer_get, nctxt, xfer_id1)
|
||||
|
||||
@ -102,7 +102,7 @@ class TransfersTableTestCase(test.TestCase):
|
||||
"Unexpected number of transfer records")
|
||||
|
||||
nctxt = context.RequestContext(user_id='new_user_id',
|
||||
project_id='new_project_id')
|
||||
project_id='new_project_id')
|
||||
self.assertRaises(exception.NotAuthorized,
|
||||
db.transfer_get_all_by_project,
|
||||
nctxt, self.ctxt.project_id)
|
||||
|
@ -116,8 +116,8 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
|
||||
LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs())
|
||||
|
||||
create_actions = fake_driver.LoggingVolumeDriver.logs_like(
|
||||
'create_volume',
|
||||
id=created_volume_id)
|
||||
'create_volume',
|
||||
id=created_volume_id)
|
||||
LOG.debug("Create_Actions: %s" % create_actions)
|
||||
|
||||
self.assertEquals(1, len(create_actions))
|
||||
@ -127,16 +127,16 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
|
||||
self.assertEquals(create_action['size'], 1)
|
||||
|
||||
export_actions = fake_driver.LoggingVolumeDriver.logs_like(
|
||||
'create_export',
|
||||
id=created_volume_id)
|
||||
'create_export',
|
||||
id=created_volume_id)
|
||||
self.assertEquals(1, len(export_actions))
|
||||
export_action = export_actions[0]
|
||||
self.assertEquals(export_action['id'], created_volume_id)
|
||||
self.assertEquals(export_action['availability_zone'], 'nova')
|
||||
|
||||
delete_actions = fake_driver.LoggingVolumeDriver.logs_like(
|
||||
'delete_volume',
|
||||
id=created_volume_id)
|
||||
'delete_volume',
|
||||
id=created_volume_id)
|
||||
self.assertEquals(1, len(delete_actions))
|
||||
delete_action = export_actions[0]
|
||||
self.assertEquals(delete_action['id'], created_volume_id)
|
||||
|
@ -62,8 +62,9 @@ class HostFiltersTestCase(test.TestCase):
|
||||
stub_out_https_backend(self.stubs)
|
||||
self.context = context.RequestContext('fake', 'fake')
|
||||
self.json_query = jsonutils.dumps(
|
||||
['and', ['>=', '$free_capacity_gb', 1024],
|
||||
['>=', '$total_capacity_gb', 10 * 1024]])
|
||||
['and',
|
||||
['>=', '$free_capacity_gb', 1024],
|
||||
['>=', '$total_capacity_gb', 10 * 1024]])
|
||||
# This has a side effect of testing 'get_filter_classes'
|
||||
# when specifying a method (in this case, our standard filters)
|
||||
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
|
||||
|
@ -79,7 +79,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
|
||||
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
|
||||
self.host_manager._choose_host_filters(specified_filters).AndReturn(
|
||||
[FakeFilterClass1])
|
||||
[FakeFilterClass1])
|
||||
|
||||
def _verify_result(self, info, result):
|
||||
for x in info['got_fprops']:
|
||||
|
@ -45,12 +45,13 @@ def _quota_reserve(context, project_id):
|
||||
for i in range(3):
|
||||
resource = 'res%d' % i
|
||||
quotas[resource] = db.quota_create(context, project_id, resource, i)
|
||||
resources[resource] = ReservableResource(resource,
|
||||
get_sync(resource, i), 'quota_res_%d' % i)
|
||||
resources[resource] = ReservableResource(
|
||||
resource,
|
||||
get_sync(resource, i), 'quota_res_%d' % i)
|
||||
deltas[resource] = i
|
||||
return db.quota_reserve(context, resources, quotas, deltas,
|
||||
datetime.utcnow(), datetime.utcnow(),
|
||||
timedelta(days=1), project_id)
|
||||
datetime.utcnow(), datetime.utcnow(),
|
||||
timedelta(days=1), project_id)
|
||||
|
||||
|
||||
class ModelsObjectComparatorMixin(object):
|
||||
@ -58,7 +59,7 @@ class ModelsObjectComparatorMixin(object):
|
||||
if ignored_keys is None:
|
||||
ignored_keys = []
|
||||
return dict([(k, v) for k, v in obj.iteritems()
|
||||
if k not in ignored_keys])
|
||||
if k not in ignored_keys])
|
||||
|
||||
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
|
||||
obj1 = self._dict_from_object(obj1, ignored_keys)
|
||||
@ -255,7 +256,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
db.iscsi_target_create_safe(self.ctxt, {'host': host,
|
||||
'target_num': 42})
|
||||
target_num = db.volume_allocate_iscsi_target(self.ctxt, volume['id'],
|
||||
host)
|
||||
host)
|
||||
self.assertEqual(target_num, 42)
|
||||
|
||||
def test_volume_attached_invalid_uuid(self):
|
||||
@ -278,8 +279,9 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
for j in xrange(3):
|
||||
db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
|
||||
for i in xrange(3):
|
||||
self.assertEqual((3, 300), db.volume_data_get_for_host(
|
||||
self.ctxt, 'h%d' % i))
|
||||
self.assertEqual((3, 300),
|
||||
db.volume_data_get_for_host(
|
||||
self.ctxt, 'h%d' % i))
|
||||
|
||||
def test_volume_data_get_for_project(self):
|
||||
for i in xrange(3):
|
||||
@ -289,13 +291,15 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
'host': 'h-%d-%d' % (i, j),
|
||||
})
|
||||
for i in xrange(3):
|
||||
self.assertEqual((3, 300), db.volume_data_get_for_project(
|
||||
self.ctxt, 'p%d' % i))
|
||||
self.assertEqual((3, 300),
|
||||
db.volume_data_get_for_project(
|
||||
self.ctxt, 'p%d' % i))
|
||||
|
||||
def test_volume_detached(self):
|
||||
volume = db.volume_create(self.ctxt, {})
|
||||
db.volume_attached(self.ctxt, volume['id'],
|
||||
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
|
||||
db.volume_attached(self.ctxt,
|
||||
volume['id'],
|
||||
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
|
||||
db.volume_detached(self.ctxt, volume['id'])
|
||||
volume = db.volume_get(self.ctxt, volume['id'])
|
||||
self.assertEqual('available', volume['status'])
|
||||
@ -306,7 +310,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
def test_volume_get(self):
|
||||
volume = db.volume_create(self.ctxt, {})
|
||||
self._assertEqualObjects(volume, db.volume_get(self.ctxt,
|
||||
volume['id']))
|
||||
volume['id']))
|
||||
|
||||
def test_volume_destroy(self):
|
||||
volume = db.volume_create(self.ctxt, {})
|
||||
@ -315,8 +319,9 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
self.ctxt, volume['id'])
|
||||
|
||||
def test_volume_get_all(self):
|
||||
volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i})
|
||||
for i in xrange(3)]
|
||||
volumes = [db.volume_create(self.ctxt,
|
||||
{'host': 'h%d' % i, 'size': i})
|
||||
for i in xrange(3)]
|
||||
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
|
||||
self.ctxt, None, None, 'host', None))
|
||||
|
||||
@ -324,7 +329,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
volumes = []
|
||||
for i in xrange(3):
|
||||
volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
|
||||
for j in xrange(3)])
|
||||
for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_host(
|
||||
@ -337,7 +342,8 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
instance_uuid = str(uuidutils.uuid.uuid1())
|
||||
instance_uuids.append(instance_uuid)
|
||||
volumes.append([db.volume_create(self.ctxt,
|
||||
{'instance_uuid': instance_uuid}) for j in xrange(3)])
|
||||
{'instance_uuid': instance_uuid})
|
||||
for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_instance_uuid(
|
||||
@ -347,7 +353,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
volumes = []
|
||||
for i in xrange(3):
|
||||
volumes.append([db.volume_create(self.ctxt, {
|
||||
'project_id': 'p%d' % i}) for j in xrange(3)])
|
||||
'project_id': 'p%d' % i}) for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_project(
|
||||
@ -361,7 +367,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
|
||||
def test_volume_get_iscsi_target_num_nonexistent(self):
|
||||
self.assertRaises(exception.ISCSITargetNotFoundForVolume,
|
||||
db.volume_get_iscsi_target_num, self.ctxt, 42)
|
||||
db.volume_get_iscsi_target_num, self.ctxt, 42)
|
||||
|
||||
def test_volume_update(self):
|
||||
volume = db.volume_create(self.ctxt, {'host': 'h1'})
|
||||
@ -381,19 +387,19 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
def setUp(self):
|
||||
super(DBAPIReservationTestCase, self).setUp()
|
||||
self.values = {'uuid': 'sample-uuid',
|
||||
'project_id': 'project1',
|
||||
'resource': 'resource',
|
||||
'delta': 42,
|
||||
'expire': datetime.utcnow() + timedelta(days=1),
|
||||
'usage': {'id': 1}}
|
||||
'project_id': 'project1',
|
||||
'resource': 'resource',
|
||||
'delta': 42,
|
||||
'expire': datetime.utcnow() + timedelta(days=1),
|
||||
'usage': {'id': 1}}
|
||||
|
||||
def test_reservation_create(self):
|
||||
reservation = db.reservation_create(self.ctxt, **self.values)
|
||||
self._assertEqualObjects(self.values, reservation, ignored_keys=(
|
||||
'deleted', 'updated_at',
|
||||
'deleted_at', 'id',
|
||||
'created_at', 'usage',
|
||||
'usage_id'))
|
||||
'deleted', 'updated_at',
|
||||
'deleted_at', 'id',
|
||||
'created_at', 'usage',
|
||||
'usage_id'))
|
||||
self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
|
||||
|
||||
def test_reservation_get(self):
|
||||
@ -402,46 +408,59 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
self._assertEqualObjects(reservation, reservation_db)
|
||||
|
||||
def test_reservation_get_nonexistent(self):
|
||||
self.assertRaises(exception.ReservationNotFound, db.reservation_get,
|
||||
self.ctxt, 'non-exitent-resevation-uuid')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
'non-exitent-resevation-uuid')
|
||||
|
||||
def test_reservation_commit(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
db.reservation_get(self.ctxt, reservations[0])
|
||||
db.reservation_commit(self.ctxt, reservations, 'project1')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, reservations[0])
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
reservations[0])
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 2},
|
||||
'res2': {'reserved': 0, 'in_use': 4}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 2},
|
||||
'res2': {'reserved': 0, 'in_use': 4}}
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
def test_reservation_rollback(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 1, 'in_use': 1},
|
||||
'res2': {'reserved': 2, 'in_use': 2}}
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
db.reservation_get(self.ctxt, reservations[0])
|
||||
db.reservation_rollback(self.ctxt, reservations, 'project1')
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, reservations[0])
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
reservations[0])
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
def test_reservation_expire(self):
|
||||
self.values['expire'] = datetime.utcnow() + timedelta(days=1)
|
||||
@ -449,11 +468,13 @@ class DBAPIReservationTestCase(BaseTest):
|
||||
db.reservation_expire(self.ctxt)
|
||||
|
||||
expected = {'project_id': 'project1',
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'))
|
||||
'res0': {'reserved': 0, 'in_use': 0},
|
||||
'res1': {'reserved': 0, 'in_use': 1},
|
||||
'res2': {'reserved': 0, 'in_use': 2}}
|
||||
self.assertEqual(expected,
|
||||
db.quota_usage_get_all_by_project(
|
||||
self.ctxt,
|
||||
'project1'))
|
||||
|
||||
|
||||
class DBAPIQuotaTestCase(BaseTest):
|
||||
@ -478,9 +499,9 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||
for i in range(3):
|
||||
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
|
||||
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
|
||||
'res0': 0,
|
||||
'res1': 1,
|
||||
'res2': 2})
|
||||
'res0': 0,
|
||||
'res1': 1,
|
||||
'res2': 2})
|
||||
|
||||
def test_quota_update(self):
|
||||
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
|
||||
@ -492,11 +513,18 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||
|
||||
def test_quota_update_nonexistent(self):
|
||||
self.assertRaises(exception.ProjectQuotaNotFound,
|
||||
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
|
||||
db.quota_update,
|
||||
self.ctxt,
|
||||
'project1',
|
||||
'resource1',
|
||||
42)
|
||||
|
||||
def test_quota_get_nonexistent(self):
|
||||
self.assertRaises(exception.ProjectQuotaNotFound,
|
||||
db.quota_get, self.ctxt, 'project1', 'resource1')
|
||||
db.quota_get,
|
||||
self.ctxt,
|
||||
'project1',
|
||||
'resource1')
|
||||
|
||||
def test_quota_reserve(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
@ -511,17 +539,22 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||
reservations = _quota_reserve(self.ctxt, 'project1')
|
||||
db.quota_destroy_all_by_project(self.ctxt, 'project1')
|
||||
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
|
||||
{'project_id': 'project1'})
|
||||
self.assertEqual(db.quota_usage_get_all_by_project(
|
||||
self.ctxt, 'project1'),
|
||||
{'project_id': 'project1'})
|
||||
{'project_id': 'project1'})
|
||||
self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt,
|
||||
'project1'),
|
||||
{'project_id': 'project1'})
|
||||
for r in reservations:
|
||||
self.assertRaises(exception.ReservationNotFound,
|
||||
db.reservation_get, self.ctxt, r)
|
||||
db.reservation_get,
|
||||
self.ctxt,
|
||||
r)
|
||||
|
||||
def test_quota_usage_get_nonexistent(self):
|
||||
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
|
||||
self.ctxt, 'p1', 'nonexitent_resource')
|
||||
self.assertRaises(exception.QuotaUsageNotFound,
|
||||
db.quota_usage_get,
|
||||
self.ctxt,
|
||||
'p1',
|
||||
'nonexitent_resource')
|
||||
|
||||
def test_quota_usage_get(self):
|
||||
reservations = _quota_reserve(self.ctxt, 'p1')
|
||||
|
@ -348,7 +348,7 @@ class FakeEcomConnection():
|
||||
syncs = self._enum_syncsvsvs()
|
||||
for sync in syncs:
|
||||
if (sync['SyncedElement'] == objectpath['SyncedElement'] and
|
||||
sync['SystemElement'] == objectpath['SystemElement']):
|
||||
sync['SystemElement'] == objectpath['SystemElement']):
|
||||
foundsync = sync
|
||||
break
|
||||
return foundsync
|
||||
|
@ -71,8 +71,8 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
self._configuration.glusterfs_sparsed_volumes = True
|
||||
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self._driver = glusterfs.GlusterfsDriver(
|
||||
configuration=self._configuration)
|
||||
self._driver =\
|
||||
glusterfs.GlusterfsDriver(configuration=self._configuration)
|
||||
self._driver.shares = {}
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -1074,7 +1074,7 @@ ISCSI_PORT_RET = (
|
||||
ISCSI_3PAR_RET = (
|
||||
'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n'
|
||||
'75,fakehost.foo,Generic,iqn.1993-08.org.debian:01:222,---,'
|
||||
'10.10.222.12\r\n'
|
||||
'10.10.222.12\r\n'
|
||||
'\r\n'
|
||||
'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n'
|
||||
'75,fakehost.foo,--,--\r\n'
|
||||
|
@ -186,5 +186,5 @@ class LioAdmTestCase(test.TestCase, TargetAdminTestCase):
|
||||
self.flags(iscsi_helper='lioadm')
|
||||
self.script_template = "\n".join([
|
||||
'rtstool create '
|
||||
'/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
|
||||
'/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
|
||||
'rtstool delete iqn.2010-10.org.openstack:volume-blaa'])
|
||||
|
@ -430,21 +430,24 @@ class NfsDriverTestCase(test.TestCase):
|
||||
drv = self._driver
|
||||
self.configuration.nfs_oversub_ratio = -1
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
|
||||
"""do_setup should throw error if nfs_used_ratio is less than 0."""
|
||||
drv = self._driver
|
||||
self.configuration.nfs_used_ratio = -1
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
|
||||
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
|
||||
drv = self._driver
|
||||
self.configuration.nfs_used_ratio = 2
|
||||
self.assertRaises(exception.NfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
drv.do_setup,
|
||||
IsA(context.RequestContext))
|
||||
|
||||
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
|
||||
"""do_setup should throw error if nfs client is not installed."""
|
||||
|
@ -171,7 +171,7 @@ class DefaultPolicyTestCase(test.TestCase):
|
||||
|
||||
def _set_brain(self, default_rule):
|
||||
brain = cinder.openstack.common.policy.Brain(self.rules,
|
||||
default_rule)
|
||||
default_rule)
|
||||
cinder.openstack.common.policy.set_brain(brain)
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -350,8 +350,7 @@ class RBDTestCase(test.TestCase):
|
||||
'auth_enabled': False,
|
||||
'auth_username': None,
|
||||
'secret_type': 'ceph',
|
||||
'secret_uuid': None,
|
||||
}
|
||||
'secret_uuid': None, }
|
||||
}
|
||||
actual = self.driver.initialize_connection(dict(name=name), None)
|
||||
self.assertDictMatch(expected, actual)
|
||||
|
@ -141,7 +141,7 @@ class StorwizeSVCManagementSimulator:
|
||||
|
||||
def _state_transition(self, function, fcmap):
|
||||
if (function == 'wait' and
|
||||
'wait' not in self._transitions[fcmap['status']]):
|
||||
'wait' not in self._transitions[fcmap['status']]):
|
||||
return ('', '')
|
||||
|
||||
if fcmap['status'] == 'copying' and function == 'wait':
|
||||
@ -1239,7 +1239,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
self.USESIM = True
|
||||
if self.USESIM:
|
||||
self.driver = StorwizeSVCFakeDriver(
|
||||
configuration=conf.Configuration(None))
|
||||
configuration=conf.Configuration(None))
|
||||
self._def_flags = {'san_ip': 'hostname',
|
||||
'san_login': 'user',
|
||||
'san_password': 'pass',
|
||||
@ -1250,8 +1250,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
self._host_name = 'storwize-svc-test'
|
||||
self._host_ip = '1.234.56.78'
|
||||
self._host_wwpns = [
|
||||
str(random.randint(0, 9999999999999999)).zfill(16),
|
||||
str(random.randint(0, 9999999999999999)).zfill(16)]
|
||||
str(random.randint(0, 9999999999999999)).zfill(16),
|
||||
str(random.randint(0, 9999999999999999)).zfill(16)]
|
||||
self._iscsi_name = ('test.initiator.%s' %
|
||||
str(random.randint(10000, 99999)))
|
||||
self.sim = StorwizeSVCManagementSimulator('volpool')
|
||||
@ -1259,7 +1259,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
self.driver.set_fake_storage(self.sim)
|
||||
else:
|
||||
self.driver = storwize_svc.StorwizeSVCDriver(
|
||||
configuration=conf.Configuration(None))
|
||||
configuration=conf.Configuration(None))
|
||||
self._def_flags = {'san_ip': '1.111.11.11',
|
||||
'san_login': 'user',
|
||||
'san_password': 'password',
|
||||
@ -1278,7 +1278,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
for line in lines:
|
||||
val = line.split('=')
|
||||
if (len(val) == 2 and
|
||||
val[0].strip().replace(" ", "") == 'port_name'):
|
||||
val[0].strip().replace(" ", "") == 'port_name'):
|
||||
self._host_wwpns.append(val[1].strip()[3:-1])
|
||||
self.assertNotEqual(len(self._host_wwpns), 0)
|
||||
|
||||
|
@ -108,9 +108,9 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
'Unexpected user id')
|
||||
|
||||
self.assertEquals(volume['id'], response['volume_id'],
|
||||
'Unexpected volume id in response.')
|
||||
'Unexpected volume id in response.')
|
||||
self.assertEquals(transfer['id'], response['id'],
|
||||
'Unexpected transfer id in response.')
|
||||
'Unexpected transfer id in response.')
|
||||
|
||||
def test_transfer_get(self):
|
||||
tx_api = transfer_api.API()
|
||||
|
@ -20,4 +20,4 @@ import cinder.flags
|
||||
import cinder.openstack.common.importutils
|
||||
|
||||
API = cinder.openstack.common.importutils.import_class(
|
||||
cinder.flags.FLAGS.transfer_api_class)
|
||||
cinder.flags.FLAGS.transfer_api_class)
|
||||
|
@ -18,8 +18,7 @@
|
||||
|
||||
# Importing full names to not pollute the namespace and cause possible
|
||||
# collisions with use of 'from cinder.volume import <foo>' elsewhere.
|
||||
import cinder.flags
|
||||
import cinder.openstack.common.importutils
|
||||
import cinder.flags as flags
|
||||
import cinder.openstack.common.importutils as import_utils
|
||||
|
||||
API = cinder.openstack.common.importutils.import_class(
|
||||
cinder.flags.FLAGS.volume_api_class)
|
||||
API = import_utils.import_class(flags.FLAGS.volume_api_class)
|
||||
|
@ -227,7 +227,7 @@ class ISCSIDriver(VolumeDriver):
|
||||
run_as_root=True)
|
||||
for target in out.splitlines():
|
||||
if (self.configuration.iscsi_ip_address in target
|
||||
and volume_name in target):
|
||||
and volume_name in target):
|
||||
return target
|
||||
return None
|
||||
|
||||
|
@ -100,7 +100,7 @@ class EMCSMISCommon():
|
||||
'storage_system': storage_system})
|
||||
|
||||
configservice = self._find_storage_configuration_service(
|
||||
storage_system)
|
||||
storage_system)
|
||||
if configservice is None:
|
||||
exception_message = (_("Error Create Volume: %(volumename)s. "
|
||||
"Storage Configuration Service not found for "
|
||||
@ -120,10 +120,10 @@ class EMCSMISCommon():
|
||||
'size': volumesize})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'CreateOrModifyElementFromStoragePool',
|
||||
configservice, ElementName=volumename, InPool=pool,
|
||||
ElementType=self._getnum(5, '16'),
|
||||
Size=self._getnum(volumesize, '64'))
|
||||
'CreateOrModifyElementFromStoragePool',
|
||||
configservice, ElementName=volumename, InPool=pool,
|
||||
ElementType=self._getnum(5, '16'),
|
||||
Size=self._getnum(volumesize, '64'))
|
||||
|
||||
LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)lu')
|
||||
% {'volumename': volumename,
|
||||
@ -205,10 +205,10 @@ class EMCSMISCommon():
|
||||
|
||||
# Create a Clone from snapshot
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'CreateElementReplica', repservice,
|
||||
ElementName=volumename,
|
||||
SyncType=self._getnum(8, '16'),
|
||||
SourceElement=snapshot_instance.path)
|
||||
'CreateElementReplica', repservice,
|
||||
ElementName=volumename,
|
||||
SyncType=self._getnum(8, '16'),
|
||||
SourceElement=snapshot_instance.path)
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -247,13 +247,13 @@ class EMCSMISCommon():
|
||||
'sync_name': str(sync_name)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(8, '16'),
|
||||
Synchronization=sync_name)
|
||||
'ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(8, '16'),
|
||||
Synchronization=sync_name)
|
||||
|
||||
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
|
||||
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
|
||||
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
|
||||
% {'volumename': volumename,
|
||||
'snapshotname': snapshotname,
|
||||
'rc': rc})
|
||||
@ -329,10 +329,10 @@ class EMCSMISCommon():
|
||||
|
||||
# Create a Clone from source volume
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'CreateElementReplica', repservice,
|
||||
ElementName=volumename,
|
||||
SyncType=self._getnum(8, '16'),
|
||||
SourceElement=src_instance.path)
|
||||
'CreateElementReplica', repservice,
|
||||
ElementName=volumename,
|
||||
SyncType=self._getnum(8, '16'),
|
||||
SourceElement=src_instance.path)
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -371,10 +371,10 @@ class EMCSMISCommon():
|
||||
'sync_name': str(sync_name)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(8, '16'),
|
||||
Synchronization=sync_name)
|
||||
'ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(8, '16'),
|
||||
Synchronization=sync_name)
|
||||
|
||||
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
|
||||
'Source Volume: %(srcname)s Return code: %(rc)lu')
|
||||
@ -422,8 +422,8 @@ class EMCSMISCommon():
|
||||
|
||||
storage_system = vol_instance['SystemName']
|
||||
|
||||
configservice = self._find_storage_configuration_service(
|
||||
storage_system)
|
||||
configservice =\
|
||||
self._find_storage_configuration_service(storage_system)
|
||||
if configservice is None:
|
||||
exception_message = (_("Error Delete Volume: %(volumename)s. "
|
||||
"Storage Configuration Service not found.")
|
||||
@ -443,9 +443,10 @@ class EMCSMISCommon():
|
||||
'name': volumename,
|
||||
'vol_instance': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'EMCReturnToStoragePool',
|
||||
configservice, TheElements=[vol_instance.path])
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('EMCReturnToStoragePool',
|
||||
configservice,
|
||||
TheElements=[vol_instance.path])
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -506,11 +507,11 @@ class EMCSMISCommon():
|
||||
'elementname': snapshotname,
|
||||
'sourceelement': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'CreateElementReplica', repservice,
|
||||
ElementName=snapshotname,
|
||||
SyncType=self._getnum(7, '16'),
|
||||
SourceElement=vol_instance.path)
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('CreateElementReplica', repservice,
|
||||
ElementName=snapshotname,
|
||||
SyncType=self._getnum(7, '16'),
|
||||
SourceElement=vol_instance.path)
|
||||
|
||||
LOG.debug(_('Create Snapshot: Volume: %(volumename)s '
|
||||
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
|
||||
@ -550,8 +551,8 @@ class EMCSMISCommon():
|
||||
% {'snapshot': snapshotname,
|
||||
'volume': volumename})
|
||||
|
||||
sync_name, storage_system = self._find_storage_sync_sv_sv(
|
||||
snapshotname, volumename, False)
|
||||
sync_name, storage_system =\
|
||||
self._find_storage_sync_sv_sv(snapshotname, volumename, False)
|
||||
if sync_name is None:
|
||||
LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
|
||||
'not found on the array. No snapshot to delete.')
|
||||
@ -578,11 +579,11 @@ class EMCSMISCommon():
|
||||
'service': str(repservice),
|
||||
'sync_name': str(sync_name)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(19, '16'),
|
||||
Synchronization=sync_name)
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('ModifyReplicaSynchronization',
|
||||
repservice,
|
||||
Operation=self._getnum(19, '16'),
|
||||
Synchronization=sync_name)
|
||||
|
||||
LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: '
|
||||
'%(snapshotname)s Return code: %(rc)lu')
|
||||
@ -651,21 +652,21 @@ class EMCSMISCommon():
|
||||
'initiator': initiators})
|
||||
|
||||
if lunmask_ctrl is None:
|
||||
rc, controller = self.conn.InvokeMethod(
|
||||
'ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
InitiatorPortIDs=initiators,
|
||||
DeviceAccesses=[self._getnum(2, '16')])
|
||||
rc, controller =\
|
||||
self.conn.InvokeMethod('ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
InitiatorPortIDs=initiators,
|
||||
DeviceAccesses=[self._getnum(2, '16')])
|
||||
else:
|
||||
LOG.debug(_('ExposePaths parameter '
|
||||
'LunMaskingSCSIProtocolController: '
|
||||
'%(lunmasking)s')
|
||||
% {'lunmasking': str(lunmask_ctrl)})
|
||||
rc, controller = self.conn.InvokeMethod(
|
||||
'ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
DeviceAccesses=[self._getnum(2, '16')],
|
||||
ProtocolControllers=[lunmask_ctrl])
|
||||
rc, controller =\
|
||||
self.conn.InvokeMethod('ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
DeviceAccesses=[self._getnum(2, '16')],
|
||||
ProtocolControllers=[lunmask_ctrl])
|
||||
|
||||
if rc != 0L:
|
||||
msg = (_('Error mapping volume %s.') % volumename)
|
||||
@ -724,9 +725,11 @@ class EMCSMISCommon():
|
||||
'masking_group': str(masking_group),
|
||||
'vol': str(vol_instance.path)})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'AddMembers', configservice,
|
||||
MaskingGroup=masking_group, Members=[vol_instance.path])
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('AddMembers',
|
||||
configservice,
|
||||
MaskingGroup=masking_group,
|
||||
Members=[vol_instance.path])
|
||||
|
||||
if rc != 0L:
|
||||
rc, errordesc = self._wait_for_job_complete(job)
|
||||
@ -864,7 +867,7 @@ class EMCSMISCommon():
|
||||
|
||||
def _get_storage_type(self, filename=None):
|
||||
"""Get the storage type from the config file."""
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -884,7 +887,7 @@ class EMCSMISCommon():
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
def _get_masking_view(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -902,7 +905,7 @@ class EMCSMISCommon():
|
||||
return None
|
||||
|
||||
def _get_ecom_cred(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -924,7 +927,7 @@ class EMCSMISCommon():
|
||||
return None
|
||||
|
||||
def _get_ecom_server(self, filename=None):
|
||||
if filename == None:
|
||||
if filename is None:
|
||||
filename = self.configuration.cinder_emc_config_file
|
||||
|
||||
file = open(filename, 'r')
|
||||
@ -1219,9 +1222,9 @@ class EMCSMISCommon():
|
||||
for ctrl in controllers:
|
||||
if storage_system != ctrl['SystemName']:
|
||||
continue
|
||||
associators = self.conn.Associators(
|
||||
ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
associators =\
|
||||
self.conn.Associators(ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
for assoc in associators:
|
||||
# if EMC_StorageHardwareID matches the initiator,
|
||||
# we found the existing EMC_LunMaskingSCSIProtocolController
|
||||
@ -1253,14 +1256,16 @@ class EMCSMISCommon():
|
||||
connector):
|
||||
foundCtrl = None
|
||||
initiators = self._find_initiator_names(connector)
|
||||
controllers = self.conn.AssociatorNames(
|
||||
vol_instance.path,
|
||||
resultClass='EMC_LunMaskingSCSIProtocolController')
|
||||
controllers =\
|
||||
self.conn.AssociatorNames(
|
||||
vol_instance.path,
|
||||
resultClass='EMC_LunMaskingSCSIProtocolController')
|
||||
|
||||
for ctrl in controllers:
|
||||
associators = self.conn.Associators(
|
||||
ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
associators =\
|
||||
self.conn.Associators(
|
||||
ctrl,
|
||||
resultClass='EMC_StorageHardwareID')
|
||||
for assoc in associators:
|
||||
# if EMC_StorageHardwareID matches the initiator,
|
||||
# we found the existing EMC_LunMaskingSCSIProtocolController
|
||||
@ -1369,8 +1374,8 @@ class EMCSMISCommon():
|
||||
pass
|
||||
|
||||
unitnames = self.conn.ReferenceNames(
|
||||
vol_instance.path,
|
||||
ResultClass='CIM_ProtocolControllerForUnit')
|
||||
vol_instance.path,
|
||||
ResultClass='CIM_ProtocolControllerForUnit')
|
||||
|
||||
for unitname in unitnames:
|
||||
controller = unitname['Antecedent']
|
||||
@ -1450,7 +1455,7 @@ class EMCSMISCommon():
|
||||
sp = idarray[2]
|
||||
|
||||
if (storage_system == storsystemname and
|
||||
owningsp == sp):
|
||||
owningsp == sp):
|
||||
foundSystem = system
|
||||
LOG.debug(_("Found Storage Processor System: %s")
|
||||
% (str(system)))
|
||||
|
@ -35,9 +35,9 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.common = emc_smis_common.EMCSMISCommon(
|
||||
'iSCSI',
|
||||
configuration=self.configuration)
|
||||
self.common =\
|
||||
emc_smis_common.EMCSMISCommon('iSCSI',
|
||||
configuration=self.configuration)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
@ -195,7 +195,7 @@ class NfsDriver(RemoteFsDriver):
|
||||
raise exception.NfsException(msg)
|
||||
|
||||
if ((not self.configuration.nfs_used_ratio > 0) and
|
||||
(self.configuration.nfs_used_ratio <= 1)):
|
||||
(self.configuration.nfs_used_ratio <= 1)):
|
||||
msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
|
||||
"and <= 1.0: %s") % self.configuration.nfs_used_ratio
|
||||
LOG.error(msg)
|
||||
|
@ -165,7 +165,7 @@ class HP3PARCommon(object):
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
if ('domain' not in cpg
|
||||
or cpg['domain'] != self.config.hp3par_domain):
|
||||
or cpg['domain'] != self.config.hp3par_domain):
|
||||
err = ("CPG's domain '%s' and config option hp3par_domain '%s'"
|
||||
" must be the same" %
|
||||
(cpg['domain'], self.config.hp3par_domain))
|
||||
|
@ -132,16 +132,16 @@ class SanISCSIDriver(ISCSIDriver):
|
||||
greenthread.sleep(random.randint(20, 500) / 100.0)
|
||||
try:
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=last_exception.exit_code,
|
||||
stdout=last_exception.stdout,
|
||||
stderr=last_exception.stderr,
|
||||
cmd=last_exception.cmd)
|
||||
exit_code=last_exception.exit_code,
|
||||
stdout=last_exception.stdout,
|
||||
stderr=last_exception.stderr,
|
||||
cmd=last_exception.cmd)
|
||||
except AttributeError:
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=-1,
|
||||
stdout="",
|
||||
stderr="Error running SSH command",
|
||||
cmd=command)
|
||||
exit_code=-1,
|
||||
stdout="",
|
||||
stderr="Error running SSH command",
|
||||
cmd=command)
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_("Error running SSH command: %s") % command)
|
||||
|
@ -132,7 +132,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
|
||||
for num in range(0, 128):
|
||||
ch = str(chr(num))
|
||||
if (not ch.isalnum() and ch != ' ' and ch != '.'
|
||||
and ch != '-' and ch != '_'):
|
||||
and ch != '-' and ch != '_'):
|
||||
invalid_ch_in_host = invalid_ch_in_host + ch
|
||||
self._string_host_name_filter = string.maketrans(
|
||||
invalid_ch_in_host, '-' * len(invalid_ch_in_host))
|
||||
@ -465,9 +465,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
|
||||
# If '!' not found, return the string and two empty strings
|
||||
attr_name, foo, attr_val = attr_line.partition('!')
|
||||
if (attr_name == 'iscsi_name' and
|
||||
'initiator' in connector and
|
||||
attr_val == connector['initiator']):
|
||||
return host
|
||||
'initiator' in connector and
|
||||
attr_val == connector['initiator']):
|
||||
return host
|
||||
elif (attr_name == 'WWPN' and
|
||||
'wwpns' in connector and
|
||||
attr_val.lower() in
|
||||
@ -1315,7 +1315,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
|
||||
if opts['protocol'] == 'iSCSI':
|
||||
# Implemented in base iSCSI class
|
||||
return super(StorwizeSVCDriver, self).copy_image_to_volume(
|
||||
context, volume, image_service, image_id)
|
||||
context, volume, image_service, image_id)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -1324,7 +1324,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
|
||||
if opts['protocol'] == 'iSCSI':
|
||||
# Implemented in base iSCSI class
|
||||
return super(StorwizeSVCDriver, self).copy_volume_to_image(
|
||||
context, volume, image_service, image_meta)
|
||||
context, volume, image_service, image_meta)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -68,7 +68,7 @@ def get_all_types(context, inactive=0, search_opts={}):
|
||||
def _check_extra_specs_match(vol_type, searchdict):
|
||||
for k, v in searchdict.iteritems():
|
||||
if (k not in vol_type['extra_specs'].keys()
|
||||
or vol_type['extra_specs'][k] != v):
|
||||
or vol_type['extra_specs'][k] != v):
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -140,7 +140,7 @@ def is_key_value_present(volume_type_id, key, value, volume_type=None):
|
||||
volume_type = get_volume_type(context.get_admin_context(),
|
||||
volume_type_id)
|
||||
if (volume_type.get('extra_specs') is None or
|
||||
volume_type['extra_specs'].get(key) != value):
|
||||
volume_type['extra_specs'].get(key) != value):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
Loading…
Reference in New Issue
Block a user