Fix pylint E1101 (no-member) errors
Fixing pylint E1101 errors and removing from pylint disable list so they cannot be re-introduced in the future. Some of the fixes included deleting or commenting out code that was no longer in use and was causing these errors. Change-Id: I5c5a7f9e90cc5cb9cf4510f578bb6e31cea1f5ef Story: 2007082 Task: 41335 Signed-off-by: Bart Wensley <barton.wensley@windriver.com>
This commit is contained in:
@@ -94,7 +94,7 @@ class OpenStackDriver(object):
|
||||
self.keystone_client)
|
||||
except Exception as exception:
|
||||
LOG.error('keystone_client region %s error: %s' %
|
||||
(region_name, exception.message))
|
||||
(region_name, str(exception)))
|
||||
raise exception
|
||||
|
||||
if region_clients:
|
||||
@@ -116,7 +116,7 @@ class OpenStackDriver(object):
|
||||
except Exception as exception:
|
||||
LOG.error('Region %s client %s thread %s error: %s' %
|
||||
(region_name, client_name, thread_name,
|
||||
exception.message))
|
||||
str(exception)))
|
||||
raise exception
|
||||
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
@@ -201,7 +201,7 @@ class OpenStackDriver(object):
|
||||
OpenStackDriver._identity_tokens[region_name] = token
|
||||
|
||||
except Exception as exception:
|
||||
LOG.info('_is_token_valid handle: %s', exception.message)
|
||||
LOG.info('_is_token_valid handle: %s', str(exception))
|
||||
# Reset the cached dictionary
|
||||
OpenStackDriver.os_clients_dict[region_name] = \
|
||||
collections.defaultdict(dict)
|
||||
|
@@ -61,6 +61,10 @@ class NotFound(DCCommonException):
|
||||
pass
|
||||
|
||||
|
||||
class Forbidden(DCCommonException):
|
||||
message = _("Requested API is forbidden")
|
||||
|
||||
|
||||
class Conflict(DCCommonException):
|
||||
pass
|
||||
|
||||
@@ -77,6 +81,10 @@ class InternalError(DCCommonException):
|
||||
message = _("Error when performing operation")
|
||||
|
||||
|
||||
class ProjectNotFound(NotFound):
|
||||
message = _("Project %(project_id)s doesn't exist")
|
||||
|
||||
|
||||
class OAMAddressesNotFound(NotFound):
|
||||
message = _("OAM Addresses Not Found")
|
||||
|
||||
|
@@ -67,6 +67,10 @@ class NotAuthorized(DBsyncException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class Forbidden(DBsyncException):
|
||||
message = _("Requested API is forbidden.")
|
||||
|
||||
|
||||
class AdminRequired(NotAuthorized):
|
||||
message = _("User does not have admin privileges: %(reason)s")
|
||||
|
||||
|
@@ -83,6 +83,10 @@ class NotAuthorized(DCManagerException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class Forbidden(DCManagerException):
|
||||
message = _("Requested API is forbidden")
|
||||
|
||||
|
||||
class ServiceUnavailable(DCManagerException):
|
||||
message = _("The service is unavailable")
|
||||
|
||||
|
@@ -562,7 +562,7 @@ class SubcloudManager(manager.Manager):
|
||||
db_api.subcloud_update(
|
||||
context, subcloud.id,
|
||||
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL_FAILED)
|
||||
LOG.error(e.message)
|
||||
LOG.error(str(e))
|
||||
install.cleanup()
|
||||
return
|
||||
|
||||
@@ -576,7 +576,7 @@ class SubcloudManager(manager.Manager):
|
||||
db_api.subcloud_update(
|
||||
context, subcloud.id,
|
||||
deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED)
|
||||
LOG.error(e.message)
|
||||
LOG.error(str(e))
|
||||
install.cleanup()
|
||||
return
|
||||
install.cleanup()
|
||||
|
@@ -335,7 +335,7 @@ class UpgradingSimplexState(BaseState):
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL_FAILED)
|
||||
self.error_log(strategy_step, e.message)
|
||||
self.error_log(strategy_step, str(e))
|
||||
# TODO(jkung): cleanup to be implemented within SubcloudInstall
|
||||
install.cleanup()
|
||||
raise
|
||||
@@ -366,7 +366,7 @@ class UpgradingSimplexState(BaseState):
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
deploy_status=consts.DEPLOY_STATE_INSTALL_FAILED)
|
||||
self.error_log(strategy_step, e.message)
|
||||
self.error_log(strategy_step, str(e))
|
||||
install.cleanup()
|
||||
raise
|
||||
|
||||
|
@@ -372,7 +372,7 @@ class ComputeAPIController(APIController):
|
||||
operation_type,
|
||||
resource_info)
|
||||
except exception.ResourceNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
raise webob.exc.HTTPNotFound(explanation=str(e))
|
||||
|
||||
|
||||
class SysinvAPIController(APIController):
|
||||
@@ -648,7 +648,7 @@ class SysinvAPIController(APIController):
|
||||
operation_type,
|
||||
json.dumps(resource_info))
|
||||
except exception.ResourceNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
raise webob.exc.HTTPNotFound(explanation=str(e))
|
||||
|
||||
|
||||
class IdentityAPIController(APIController):
|
||||
@@ -765,7 +765,7 @@ class IdentityAPIController(APIController):
|
||||
operation_type,
|
||||
json.dumps(resource_info))
|
||||
except exception.ResourceNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
raise webob.exc.HTTPNotFound(explanation=str(e))
|
||||
else:
|
||||
LOG.warning("Empty resource id for resource: %s", operation_type)
|
||||
|
||||
@@ -823,7 +823,7 @@ class CinderAPIController(APIController):
|
||||
operation_type,
|
||||
resource_info)
|
||||
except exception.ResourceNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
raise webob.exc.HTTPNotFound(explanation=str(e))
|
||||
|
||||
|
||||
class NeutronAPIController(APIController):
|
||||
@@ -891,7 +891,7 @@ class NeutronAPIController(APIController):
|
||||
operation_type,
|
||||
resource_info)
|
||||
except exception.ResourceNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
raise webob.exc.HTTPNotFound(explanation=str(e))
|
||||
|
||||
|
||||
class OrchAPIController(APIController):
|
||||
|
@@ -73,6 +73,10 @@ class NotAuthorized(OrchestratorException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class Forbidden(OrchestratorException):
|
||||
message = _("Requested API is forbidden")
|
||||
|
||||
|
||||
class ServiceUnavailable(OrchestratorException):
|
||||
message = _("The service is unavailable")
|
||||
|
||||
|
@@ -136,7 +136,8 @@ def enqueue_work(context, endpoint_type,
|
||||
master_id=source_resource_id)
|
||||
rsrc.create()
|
||||
LOG.info("Resource created in DB {}/{}/{}/{}".format(
|
||||
rsrc.id, resource_type, source_resource_id, operation_type))
|
||||
rsrc.id, # pylint: disable=E1101
|
||||
resource_type, source_resource_id, operation_type))
|
||||
except oslo_db_exception.DBDuplicateEntry:
|
||||
# In case of discrepancies found during audit, resource might
|
||||
# be already present in DB, but not its dependent resources.
|
||||
@@ -179,7 +180,7 @@ def enqueue_work(context, endpoint_type,
|
||||
orch_req = orchrequest.OrchRequest(
|
||||
context=context, state=consts.ORCH_REQUEST_QUEUED,
|
||||
target_region_name=sc.region_name,
|
||||
orch_job_id=orch_job.id)
|
||||
orch_job_id=orch_job.id) # pylint: disable=E1101
|
||||
orch_req.create()
|
||||
LOG.info("Work order created for {}:{}/{}/{}/{}".format(
|
||||
subcloud, rsrc.id, resource_type, source_resource_id, operation_type))
|
||||
|
@@ -129,7 +129,7 @@ class NovaClient(base.DriverBase):
|
||||
return keypair
|
||||
|
||||
except Exception as exception:
|
||||
LOG.error('Exception Occurred: %s', exception.message)
|
||||
LOG.error('Exception Occurred: %s', str(exception))
|
||||
pass
|
||||
|
||||
def create_keypairs(self, force, keypair):
|
||||
@@ -143,7 +143,7 @@ class NovaClient(base.DriverBase):
|
||||
self.nova_client.keypairs.delete(keypair)
|
||||
LOG.info("Deleted Keypair: %s", keypair.name)
|
||||
except Exception as exception:
|
||||
LOG.error('Exception Occurred: %s', exception.message)
|
||||
LOG.error('Exception Occurred: %s', str(exception))
|
||||
pass
|
||||
LOG.info("Created Keypair: %s", keypair.name)
|
||||
return self.nova_client.keypairs. \
|
||||
|
@@ -21,9 +21,6 @@ from oslo_concurrency import lockutils
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from dcorch.common import consts
|
||||
from dcorch.common import exceptions
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack.fm import FmClient
|
||||
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient
|
||||
@@ -58,7 +55,7 @@ class OpenStackDriver(object):
|
||||
OpenStackDriver.os_clients_dict[region_name]['keystone'] = \
|
||||
self.keystone_client
|
||||
|
||||
self.disabled_quotas = self._get_disabled_quotas(region_name)
|
||||
# self.disabled_quotas = self._get_disabled_quotas(region_name)
|
||||
if region_name in OpenStackDriver.os_clients_dict and \
|
||||
self._is_token_valid(region_name):
|
||||
LOG.info('Using cached OS client objects %s' % region_name)
|
||||
@@ -79,7 +76,7 @@ class OpenStackDriver(object):
|
||||
'sysinv'] = self.sysinv_client
|
||||
except Exception as exception:
|
||||
LOG.error('sysinv_client region %s error: %s' %
|
||||
(region_name, exception.message))
|
||||
(region_name, str(exception)))
|
||||
|
||||
try:
|
||||
self.fm_client = FmClient(
|
||||
@@ -90,7 +87,7 @@ class OpenStackDriver(object):
|
||||
'fm'] = self.fm_client
|
||||
except Exception as exception:
|
||||
LOG.error('fm_client region %s error: %s' %
|
||||
(region_name, exception.message))
|
||||
(region_name, str(exception)))
|
||||
|
||||
@classmethod
|
||||
@lockutils.synchronized('dcorch-openstackdriver')
|
||||
@@ -106,129 +103,139 @@ class OpenStackDriver(object):
|
||||
try:
|
||||
return self.keystone_client.get_enabled_projects(id_only)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
LOG.error('Error Occurred: %s', str(exception))
|
||||
|
||||
def get_project_by_name(self, projectname):
|
||||
try:
|
||||
return self.keystone_client.get_project_by_name(projectname)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred : %s', exception.message)
|
||||
LOG.error('Error Occurred : %s', str(exception))
|
||||
|
||||
def get_project_by_id(self, projectid):
|
||||
try:
|
||||
return self.keystone_client.get_project_by_id(projectid)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred : %s', exception.message)
|
||||
LOG.error('Error Occurred : %s', str(exception))
|
||||
|
||||
def get_enabled_users(self, id_only=True):
|
||||
try:
|
||||
return self.keystone_client.get_enabled_users(id_only)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred : %s', exception.message)
|
||||
LOG.error('Error Occurred : %s', str(exception))
|
||||
|
||||
def get_user_by_name(self, username):
|
||||
try:
|
||||
return self.keystone_client.get_user_by_name(username)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred : %s', exception.message)
|
||||
LOG.error('Error Occurred : %s', str(exception))
|
||||
|
||||
def get_user_by_id(self, userid):
|
||||
try:
|
||||
return self.keystone_client.get_user_by_id(userid)
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred : %s', exception.message)
|
||||
LOG.error('Error Occurred : %s', str(exception))
|
||||
|
||||
def get_resource_usages(self, project_id, user_id):
|
||||
# If one of the resources is unavailable we still want to return
|
||||
# any usage information we have for the others.
|
||||
nova_usages = {}
|
||||
neutron_usages = {}
|
||||
cinder_usages = {}
|
||||
try:
|
||||
nova_usages = self.nova_client.get_resource_usages(project_id,
|
||||
user_id)
|
||||
if user_id is None:
|
||||
# neutron/cinder don't do per-user quotas/usage
|
||||
neutron_usages = self.neutron_client.get_resource_usages(
|
||||
project_id)
|
||||
cinder_usages = self.cinder_client.get_resource_usages(
|
||||
project_id)
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut) as ex:
|
||||
# Delete the cached objects for that region
|
||||
LOG.error('Error Occurred: %s', ex.message)
|
||||
del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
return nova_usages, neutron_usages, cinder_usages
|
||||
raise(NotImplementedError)
|
||||
|
||||
# # If one of the resources is unavailable we still want to return
|
||||
# # any usage information we have for the others.
|
||||
# nova_usages = {}
|
||||
# neutron_usages = {}
|
||||
# cinder_usages = {}
|
||||
# try:
|
||||
# nova_usages = self.nova_client.get_resource_usages(project_id,
|
||||
# user_id)
|
||||
# if user_id is None:
|
||||
# # neutron/cinder don't do per-user quotas/usage
|
||||
# neutron_usages = self.neutron_client.get_resource_usages(
|
||||
# project_id)
|
||||
# cinder_usages = self.cinder_client.get_resource_usages(
|
||||
# project_id)
|
||||
# except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
# exceptions.TimeOut) as ex:
|
||||
# # Delete the cached objects for that region
|
||||
# LOG.error('Error Occurred: %s', ex.message)
|
||||
# del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
# except Exception as exception:
|
||||
# LOG.error('Error Occurred: %s', exception.message)
|
||||
# return nova_usages, neutron_usages, cinder_usages
|
||||
|
||||
def get_quota_limits(self, project_id, user_id):
|
||||
# If one of the resources is unavailable we still want to return
|
||||
# any limit information we have for the others.
|
||||
nova_limits = {}
|
||||
neutron_limits = {}
|
||||
cinder_limits = {}
|
||||
try:
|
||||
nova_limits = self.nova_client.get_quota_limits(project_id,
|
||||
user_id)
|
||||
if user_id is None:
|
||||
# neutron/cinder don't do per-user quotas/usage
|
||||
neutron_limits = self.neutron_client.get_quota_limits(
|
||||
project_id)
|
||||
cinder_limits = self.cinder_client.get_quota_limits(
|
||||
project_id)
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut) as ex:
|
||||
LOG.error('Error Occurred: %s', ex.message)
|
||||
# Delete the cached objects for that region
|
||||
del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
return nova_limits, neutron_limits, cinder_limits
|
||||
raise(NotImplementedError)
|
||||
|
||||
# # If one of the resources is unavailable we still want to return
|
||||
# # any limit information we have for the others.
|
||||
# nova_limits = {}
|
||||
# neutron_limits = {}
|
||||
# cinder_limits = {}
|
||||
# try:
|
||||
# nova_limits = self.nova_client.get_quota_limits(project_id,
|
||||
# user_id)
|
||||
# if user_id is None:
|
||||
# # neutron/cinder don't do per-user quotas/usage
|
||||
# neutron_limits = self.neutron_client.get_quota_limits(
|
||||
# project_id)
|
||||
# cinder_limits = self.cinder_client.get_quota_limits(
|
||||
# project_id)
|
||||
# except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
# exceptions.TimeOut) as ex:
|
||||
# LOG.error('Error Occurred: %s', ex.message)
|
||||
# # Delete the cached objects for that region
|
||||
# del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
# except Exception as exception:
|
||||
# LOG.error('Error Occurred: %s', exception.message)
|
||||
# return nova_limits, neutron_limits, cinder_limits
|
||||
|
||||
def write_quota_limits(self, project_id, user_id, limits_to_write):
|
||||
try:
|
||||
self.nova_client.update_quota_limits(project_id, user_id,
|
||||
**limits_to_write['nova'])
|
||||
# Only nova supports per-user quotas.
|
||||
if user_id is None:
|
||||
self.cinder_client.update_quota_limits(
|
||||
project_id, **limits_to_write['cinder'])
|
||||
self.neutron_client.update_quota_limits(
|
||||
project_id, limits_to_write['neutron'])
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut) as ex:
|
||||
LOG.error('Error Occurred: %s', ex.message)
|
||||
# Delete the cached objects for that region
|
||||
del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
raise(NotImplementedError)
|
||||
|
||||
# try:
|
||||
# self.nova_client.update_quota_limits(project_id, user_id,
|
||||
# **limits_to_write['nova'])
|
||||
# # Only nova supports per-user quotas.
|
||||
# if user_id is None:
|
||||
# self.cinder_client.update_quota_limits(
|
||||
# project_id, **limits_to_write['cinder'])
|
||||
# self.neutron_client.update_quota_limits(
|
||||
# project_id, limits_to_write['neutron'])
|
||||
# except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
# exceptions.TimeOut) as ex:
|
||||
# LOG.error('Error Occurred: %s', ex.message)
|
||||
# # Delete the cached objects for that region
|
||||
# del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
# except Exception as exception:
|
||||
# LOG.error('Error Occurred: %s', exception.message)
|
||||
|
||||
def delete_quota_limits(self, project_id):
|
||||
try:
|
||||
self.nova_client.delete_quota_limits(project_id)
|
||||
self.neutron_client.delete_quota_limits(project_id)
|
||||
self.cinder_client.delete_quota_limits(project_id)
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut):
|
||||
# Delete the cached objects for that region
|
||||
del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
raise(NotImplementedError)
|
||||
|
||||
# try:
|
||||
# self.nova_client.delete_quota_limits(project_id)
|
||||
# self.neutron_client.delete_quota_limits(project_id)
|
||||
# self.cinder_client.delete_quota_limits(project_id)
|
||||
# except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
# exceptions.TimeOut):
|
||||
# # Delete the cached objects for that region
|
||||
# del OpenStackDriver.os_clients_dict[self.region_name]
|
||||
# except Exception as exception:
|
||||
# LOG.error('Error Occurred: %s', exception.message)
|
||||
|
||||
def _get_disabled_quotas(self, region):
|
||||
disabled_quotas = []
|
||||
if not self.keystone_client.is_service_enabled('volume') and \
|
||||
not self.keystone_client.is_service_enabled('volumev2'):
|
||||
disabled_quotas.extend(consts.CINDER_QUOTA_FIELDS)
|
||||
# Neutron
|
||||
if not self.keystone_client.is_service_enabled('network'):
|
||||
disabled_quotas.extend(consts.NEUTRON_QUOTA_FIELDS)
|
||||
else:
|
||||
disabled_quotas.extend(['floating_ips', 'fixed_ips'])
|
||||
disabled_quotas.extend(['security_groups',
|
||||
'security_group_rules'])
|
||||
return disabled_quotas
|
||||
raise(NotImplementedError)
|
||||
|
||||
# disabled_quotas = []
|
||||
# if not self.keystone_client.is_service_enabled('volume') and \
|
||||
# not self.keystone_client.is_service_enabled('volumev2'):
|
||||
# disabled_quotas.extend(consts.CINDER_QUOTA_FIELDS)
|
||||
# # Neutron
|
||||
# if not self.keystone_client.is_service_enabled('network'):
|
||||
# disabled_quotas.extend(consts.NEUTRON_QUOTA_FIELDS)
|
||||
# else:
|
||||
# disabled_quotas.extend(['floating_ips', 'fixed_ips'])
|
||||
# disabled_quotas.extend(['security_groups',
|
||||
# 'security_group_rules'])
|
||||
# return disabled_quotas
|
||||
|
||||
def get_all_regions_for_project(self, project_id):
|
||||
try:
|
||||
@@ -244,7 +251,7 @@ class OpenStackDriver(object):
|
||||
region_lists.remove(dccommon_consts.CLOUD_0)
|
||||
return region_lists
|
||||
except Exception as exception:
|
||||
LOG.error('Error Occurred: %s', exception.message)
|
||||
LOG.error('Error Occurred: %s', str(exception))
|
||||
raise
|
||||
|
||||
def _get_filtered_regions(self, project_id):
|
||||
@@ -269,7 +276,7 @@ class OpenStackDriver(object):
|
||||
keystone.tokens.validate(
|
||||
OpenStackDriver._identity_tokens[region_name])
|
||||
except Exception as exception:
|
||||
LOG.info('_is_token_valid handle: %s', exception.message)
|
||||
LOG.info('_is_token_valid handle: %s', str(exception))
|
||||
# Reset the cached dictionary
|
||||
OpenStackDriver.os_clients_dict[region_name] = \
|
||||
collections.defaultdict(dict)
|
||||
|
@@ -85,7 +85,7 @@ class FernetKeyManager(manager.Manager):
|
||||
if self.gsm:
|
||||
self.gsm.sync_request(self.context, self.endpoint_type)
|
||||
except Exception as e:
|
||||
LOG.error(_("Exception in schedule_work: %s") % e.message)
|
||||
LOG.error(_("Exception in schedule_work: %s") % str(e))
|
||||
|
||||
@staticmethod
|
||||
def _get_master_keys():
|
||||
@@ -104,7 +104,7 @@ class FernetKeyManager(manager.Manager):
|
||||
dccommon_consts.CLOUD_0)
|
||||
except Exception as e:
|
||||
LOG.info(_("Fail to retrieve the master fernet keys: %s") %
|
||||
e.message)
|
||||
str(e))
|
||||
return keys
|
||||
|
||||
def rotate_fernet_keys(self):
|
||||
@@ -148,5 +148,5 @@ class FernetKeyManager(manager.Manager):
|
||||
LOG.info(_("Update the fernet repo on %s timeout") %
|
||||
subcloud_name)
|
||||
except Exception as e:
|
||||
error_msg = "subcloud: {}, {}".format(subcloud_name, e.message)
|
||||
error_msg = "subcloud: {}, {}".format(subcloud_name, str(e))
|
||||
LOG.info(_("Fail to update fernet repo %s") % error_msg)
|
||||
|
@@ -391,7 +391,7 @@ class QuotaManager(manager.Manager):
|
||||
sc_user = sc_os_driver.get_user_by_name(quser.name)
|
||||
sc_user_id = getattr(sc_user, 'id', None)
|
||||
except Exception as e:
|
||||
LOG.error("quota sync %s: %s", current_region, e.message)
|
||||
LOG.error("quota sync %s: %s", current_region, str(e))
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.update_quota_limits,
|
||||
@@ -441,7 +441,7 @@ class QuotaManager(manager.Manager):
|
||||
sc_user = sc_os_driver.get_user_by_name(quser.name)
|
||||
sc_user_id = getattr(sc_user, 'id', None)
|
||||
except Exception as e:
|
||||
LOG.error("quota usage %s: %s", current_region, e.message)
|
||||
LOG.error("quota usage %s: %s", current_region, str(e))
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.read_quota_usage,
|
||||
|
@@ -1792,7 +1792,8 @@ class IdentitySyncThread(SyncThread):
|
||||
master_id=master_id)
|
||||
rsrc.create()
|
||||
LOG.info("Resource created in DB {}/{}/{}".format(
|
||||
rsrc.id, resource_type, master_id))
|
||||
rsrc.id, # pylint: disable=E1101
|
||||
resource_type, master_id))
|
||||
|
||||
self.persist_db_subcloud_resource(rsrc.id,
|
||||
self.get_resource_id(
|
||||
|
@@ -234,7 +234,7 @@ class NetworkSyncThread(SyncThread):
|
||||
try:
|
||||
rule = self.sc_neutron_client.create_security_group_rule(body)
|
||||
rule_id = rule['security_group_rule']['id']
|
||||
except neutronclient.common.exceptions.Conflict:
|
||||
except neutronclient_exceptions.Conflict:
|
||||
# This can happen if we try to create a rule that is already there.
|
||||
# If this happens, we'll update our mapping on the next audit.
|
||||
LOG.info("Problem creating security group rule {}, neutron says"
|
||||
@@ -365,7 +365,8 @@ class NetworkSyncThread(SyncThread):
|
||||
master_id=master_id)
|
||||
rsrc.create()
|
||||
LOG.info("Resource created in DB {}/{}/{}".format(
|
||||
rsrc.id, resource_type, master_id))
|
||||
rsrc.id, # pylint: disable=E1101
|
||||
resource_type, master_id))
|
||||
|
||||
self.persist_db_subcloud_resource(rsrc.id,
|
||||
sc_r['id'])
|
||||
|
@@ -18,7 +18,7 @@
|
||||
from dcorch.common import exceptions
|
||||
from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -26,15 +26,15 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
"""DC Orchestrator orchestration job object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(),
|
||||
'user_id': fields.StringField(),
|
||||
'project_id': fields.StringField(),
|
||||
'endpoint_type': fields.StringField(),
|
||||
'source_resource_id': fields.StringField(), # resource master_id
|
||||
'operation_type': fields.StringField(),
|
||||
'resource_id': fields.IntegerField(),
|
||||
'resource_info': fields.StringField(nullable=True),
|
||||
'id': ovo_fields.IntegerField(),
|
||||
'uuid': ovo_fields.UUIDField(),
|
||||
'user_id': ovo_fields.StringField(),
|
||||
'project_id': ovo_fields.StringField(),
|
||||
'endpoint_type': ovo_fields.StringField(),
|
||||
'source_resource_id': ovo_fields.StringField(), # resource master_id
|
||||
'operation_type': ovo_fields.StringField(),
|
||||
'resource_id': ovo_fields.IntegerField(),
|
||||
'resource_info': ovo_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
def create(self):
|
||||
@@ -82,9 +82,12 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
updates = self.obj_get_changes()
|
||||
updates.pop('id', None)
|
||||
updates.pop('uuid', None)
|
||||
db_orch_job = db_api.orch_job_update(self._context, self.id, updates)
|
||||
db_orch_job = db_api.orch_job_update(self._context,
|
||||
self.id, # pylint: disable=E1101
|
||||
updates)
|
||||
self._from_db_object(self._context, self, db_orch_job)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def delete(self):
|
||||
db_api.orch_job_delete(self._context, self.id)
|
||||
db_api.orch_job_delete(self._context,
|
||||
self.id) # pylint: disable=E1101
|
||||
|
@@ -20,7 +20,7 @@ from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from dcorch.objects import orchjob
|
||||
from oslo_versionedobjects import base as ovo_base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -28,17 +28,17 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
"""DC Orchestrator orchestration request object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(),
|
||||
'state': fields.StringField(),
|
||||
'try_count': fields.IntegerField(),
|
||||
'api_version': fields.StringField(nullable=True),
|
||||
'target_region_name': fields.StringField(),
|
||||
'orch_job_id': fields.IntegerField(),
|
||||
'orch_job': fields.ObjectField('OrchJob'),
|
||||
'updated_at': fields.DateTimeField(nullable=True),
|
||||
'deleted_at': fields.DateTimeField(nullable=True),
|
||||
'deleted': fields.IntegerField()
|
||||
'id': ovo_fields.IntegerField(),
|
||||
'uuid': ovo_fields.UUIDField(),
|
||||
'state': ovo_fields.StringField(),
|
||||
'try_count': ovo_fields.IntegerField(),
|
||||
'api_version': ovo_fields.StringField(nullable=True),
|
||||
'target_region_name': ovo_fields.StringField(),
|
||||
'orch_job_id': ovo_fields.IntegerField(),
|
||||
'orch_job': ovo_fields.ObjectField('OrchJob'),
|
||||
'updated_at': ovo_fields.DateTimeField(nullable=True),
|
||||
'deleted_at': ovo_fields.DateTimeField(nullable=True),
|
||||
'deleted': ovo_fields.IntegerField()
|
||||
}
|
||||
|
||||
def create(self):
|
||||
@@ -103,12 +103,15 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
updates.pop('id', None)
|
||||
updates.pop('uuid', None)
|
||||
db_orch_request = db_api.orch_request_update(
|
||||
self._context, self.id, updates)
|
||||
self._context,
|
||||
self.id, # pylint: disable=E1101
|
||||
updates)
|
||||
self._from_db_object(self._context, self, db_orch_request)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def delete(self):
|
||||
db_api.orch_request_destroy(self._context, self.id)
|
||||
db_api.orch_request_destroy(self._context,
|
||||
self.id) # pylint: disable=E1101
|
||||
|
||||
@classmethod
|
||||
def delete_previous_failed_requests(cls, context, delete_time):
|
||||
@@ -122,7 +125,7 @@ class OrchRequestList(ovo_base.ObjectListBase, base.OrchestratorObject):
|
||||
VERSION = '1.1'
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('OrchRequest'),
|
||||
'objects': ovo_fields.ListOfObjectsField('OrchRequest'),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -19,7 +19,7 @@ from dcorch.common import exceptions
|
||||
from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from oslo_versionedobjects import base as ovo_base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -27,10 +27,10 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
"""DC Orchestrator subcloud object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(),
|
||||
'resource_type': fields.StringField(),
|
||||
'master_id': fields.StringField(),
|
||||
'id': ovo_fields.IntegerField(),
|
||||
'uuid': ovo_fields.UUIDField(),
|
||||
'resource_type': ovo_fields.StringField(),
|
||||
'master_id': ovo_fields.StringField(),
|
||||
}
|
||||
|
||||
def create(self):
|
||||
@@ -63,13 +63,17 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
|
||||
def delete(self):
|
||||
db_api.resource_delete(
|
||||
self._context, self.resource_type, self.master_id)
|
||||
self._context,
|
||||
self.resource_type, # pylint: disable=E1101
|
||||
self.master_id) # pylint: disable=E1101
|
||||
|
||||
def save(self):
|
||||
updates = self.obj_get_changes()
|
||||
updates.pop('id', None)
|
||||
updates.pop('uuid', None)
|
||||
db_resource = db_api.resource_update(self._context, self.id, updates)
|
||||
db_resource = db_api.resource_update(self._context,
|
||||
self.id, # pylint: disable=E1101
|
||||
updates)
|
||||
self._from_db_object(self._context, self, db_resource)
|
||||
self.obj_reset_changes()
|
||||
|
||||
@@ -80,7 +84,7 @@ class ResourceList(ovo_base.ObjectListBase, base.OrchestratorObject):
|
||||
VERSION = '1.1'
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('Resource'),
|
||||
'objects': ovo_fields.ListOfObjectsField('Resource'),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -17,7 +17,7 @@
|
||||
|
||||
from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -25,16 +25,16 @@ class Service(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
"""DC Orchestrator service object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.UUIDField(),
|
||||
'host': fields.StringField(),
|
||||
'binary': fields.StringField(),
|
||||
'topic': fields.StringField(),
|
||||
'disabled': fields.BooleanField(),
|
||||
'disabled_reason': fields.StringField(nullable=True),
|
||||
'created_at': fields.DateTimeField(),
|
||||
'updated_at': fields.DateTimeField(),
|
||||
'deleted_at': fields.DateTimeField(nullable=True),
|
||||
'deleted': fields.IntegerField(nullable=True),
|
||||
'id': ovo_fields.UUIDField(),
|
||||
'host': ovo_fields.StringField(),
|
||||
'binary': ovo_fields.StringField(),
|
||||
'topic': ovo_fields.StringField(),
|
||||
'disabled': ovo_fields.BooleanField(),
|
||||
'disabled_reason': ovo_fields.StringField(nullable=True),
|
||||
'created_at': ovo_fields.DateTimeField(),
|
||||
'updated_at': ovo_fields.DateTimeField(),
|
||||
'deleted_at': ovo_fields.DateTimeField(nullable=True),
|
||||
'deleted': ovo_fields.IntegerField(nullable=True),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -23,7 +23,7 @@ from dcorch.common import exceptions
|
||||
from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from oslo_versionedobjects import base as ovo_base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@@ -33,14 +33,14 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
"""DC Orchestrator subcloud object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(),
|
||||
'region_name': fields.StringField(),
|
||||
'software_version': fields.StringField(),
|
||||
'management_state': fields.StringField(nullable=True),
|
||||
'availability_status': fields.StringField(),
|
||||
'capabilities': fields.DictOfListOfStringsField(),
|
||||
'initial_sync_state': fields.StringField(),
|
||||
'id': ovo_fields.IntegerField(),
|
||||
'uuid': ovo_fields.UUIDField(),
|
||||
'region_name': ovo_fields.StringField(),
|
||||
'software_version': ovo_fields.StringField(),
|
||||
'management_state': ovo_fields.StringField(nullable=True),
|
||||
'availability_status': ovo_fields.StringField(),
|
||||
'capabilities': ovo_fields.DictOfListOfStringsField(),
|
||||
'initial_sync_state': ovo_fields.StringField(),
|
||||
}
|
||||
|
||||
def create(self):
|
||||
@@ -60,12 +60,9 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
self._context, region_name, updates)
|
||||
return self._from_db_object(self._context, self, db_subcloud)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to create subcloud %s: %s" % (self.region_name, e))
|
||||
try:
|
||||
db_api.subcloud_alarms_delete(self._context, self.region_name)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to delete alarm entry for %s: %s"
|
||||
% (self.region_name, e))
|
||||
LOG.error("Failed to create subcloud %s: %s" % (
|
||||
self.region_name, # pylint: disable=E1101
|
||||
str(e)))
|
||||
raise e
|
||||
|
||||
@classmethod
|
||||
@@ -77,8 +74,10 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
updates = self.obj_get_changes()
|
||||
updates.pop('id', None)
|
||||
updates.pop('uuid', None)
|
||||
db_subcloud = db_api.subcloud_update(self._context, self.region_name,
|
||||
updates)
|
||||
db_subcloud = db_api.subcloud_update(
|
||||
self._context,
|
||||
self.region_name, # pylint: disable=E1101
|
||||
updates)
|
||||
self._from_db_object(self._context, self, db_subcloud)
|
||||
self.obj_reset_changes()
|
||||
|
||||
@@ -86,15 +85,20 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
||||
# TODO(cfriesen): fix up to use delete cascade
|
||||
# delete the associated sync requests
|
||||
try:
|
||||
db_api.orch_request_delete_by_subcloud(self._context, self.region_name)
|
||||
db_api.orch_request_delete_by_subcloud(
|
||||
self._context,
|
||||
self.region_name) # pylint: disable=E1101
|
||||
except Exception as e:
|
||||
LOG.error("Failed to delete orchestration request for %s: %s"
|
||||
% (self.region_name, e))
|
||||
% (self.region_name, # pylint: disable=E1101
|
||||
str(e)))
|
||||
try:
|
||||
db_api.subcloud_delete(self._context, self.region_name)
|
||||
db_api.subcloud_delete(self._context,
|
||||
self.region_name) # pylint: disable=E1101
|
||||
except Exception as e:
|
||||
LOG.error("Failed to delete subcloud entry for %s: %s"
|
||||
% (self.region_name, e))
|
||||
% (self.region_name, # pylint: disable=E1101
|
||||
str(e)))
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -103,7 +107,7 @@ class SubcloudList(ovo_base.ObjectListBase, base.OrchestratorObject):
|
||||
VERSION = '1.1'
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('Subcloud'),
|
||||
'objects': ovo_fields.ListOfObjectsField('Subcloud'),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -20,7 +20,7 @@ from dcorch.common import exceptions
|
||||
from dcorch.db import api as db_api
|
||||
from dcorch.objects import base
|
||||
from oslo_versionedobjects import base as ovo_base
|
||||
from oslo_versionedobjects import fields
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -29,12 +29,12 @@ class SubcloudResource(base.OrchestratorObject,
|
||||
"""DC Orchestrator subcloud object."""
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(),
|
||||
'shared_config_state': fields.StringField(),
|
||||
'subcloud_resource_id': fields.StringField(),
|
||||
'resource_id': fields.IntegerField(),
|
||||
'subcloud_id': fields.IntegerField(),
|
||||
'id': ovo_fields.IntegerField(),
|
||||
'uuid': ovo_fields.UUIDField(),
|
||||
'shared_config_state': ovo_fields.StringField(),
|
||||
'subcloud_resource_id': ovo_fields.StringField(),
|
||||
'resource_id': ovo_fields.IntegerField(),
|
||||
'subcloud_id': ovo_fields.IntegerField(),
|
||||
}
|
||||
|
||||
def create(self):
|
||||
@@ -56,7 +56,7 @@ class SubcloudResource(base.OrchestratorObject,
|
||||
return self._from_db_object(self._context, self, db_subcloud_resource)
|
||||
|
||||
def is_managed(self):
|
||||
return self.shared_config_state == consts.SHARED_CONFIG_STATE_MANAGED
|
||||
return self.shared_config_state == consts.SHARED_CONFIG_STATE_MANAGED # pylint: disable=E1101
|
||||
|
||||
@classmethod
|
||||
def get_by_id(cls, context, id):
|
||||
@@ -76,13 +76,16 @@ class SubcloudResource(base.OrchestratorObject,
|
||||
updates.pop('uuid', None)
|
||||
updates.pop('resource', None)
|
||||
updates.pop('subcloud', None)
|
||||
db_subcloud = db_api.subcloud_resource_update(self._context,
|
||||
self.id, updates)
|
||||
db_subcloud = db_api.subcloud_resource_update(
|
||||
self._context,
|
||||
self.id, # pylint: disable=E1101
|
||||
updates)
|
||||
self._from_db_object(self._context, self, db_subcloud)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def delete(self):
|
||||
db_api.subcloud_resource_delete(self._context, self.id)
|
||||
db_api.subcloud_resource_delete(self._context,
|
||||
self.id) # pylint: disable=E1101
|
||||
|
||||
|
||||
@base.OrchestratorObjectRegistry.register
|
||||
@@ -91,7 +94,7 @@ class SubcloudResourceList(ovo_base.ObjectListBase, base.OrchestratorObject):
|
||||
VERSION = '1.1'
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('SubcloudResource'),
|
||||
'objects': ovo_fields.ListOfObjectsField('SubcloudResource'),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -61,7 +61,6 @@ load-plugins=
|
||||
# W1201: logging-not-lazy
|
||||
# W1401: anomalous-backslash-in-string
|
||||
# E detect Errors for important programming issues (i.e. most probably bug)
|
||||
# E1101: no-member
|
||||
# E1102: not-callable
|
||||
# E1120: no-value-for-parameter (sqlalchemy)
|
||||
# E1128: assignment-from-none
|
||||
@@ -69,7 +68,7 @@ disable=C,R,fixme,
|
||||
W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,W0223,W0231,W0235,
|
||||
W0311,W0402,W0403,W0603,W0612,W0613,W0621,W0622,W0631,W0703,W0706,
|
||||
W1113,W1201,W1401,
|
||||
E1101,E1102,E1120,E1128
|
||||
E1102,E1120,E1128
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
Reference in New Issue
Block a user