Improve exception handling for expired certificates
In order to avoid dcorch from attempting to sync expired certificates this adds a certificate validity check and raises a CertificateExpiredException That exception will be handled and keep subcloud status as out-of-sync while the expired certificate is in the system controller Uninstalling the expired certificate from system controller will result in subclouds going back to in-sync status next dcorch sync run Closes-Bug: 1939155 Change-Id: I27c40ef34c76f5d673429ea2384f76f25241322b Signed-off-by: Rei Oliveira <Reinildes.JoseMateusOliveira@windriver.com>
This commit is contained in:
@@ -175,6 +175,11 @@ class SyncRequestFailed(OrchestratorException):
|
||||
message = _("The sync operation failed")
|
||||
|
||||
|
||||
class SyncRequestAbortedBySystem(OrchestratorException):
|
||||
message = _("The sync operation was aborted by the system because"
|
||||
" some condition was not met")
|
||||
|
||||
|
||||
class SyncRequestFailedRetry(OrchestratorException):
|
||||
message = _("The sync operation failed, will retry")
|
||||
|
||||
@@ -221,3 +226,7 @@ class OrchRequestAlreadyExists(Conflict):
|
||||
|
||||
class ObjectActionError(OrchestratorException):
|
||||
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
|
||||
|
||||
|
||||
class CertificateExpiredException(OrchestratorException):
|
||||
message = _("Certificate is expired and will not be synced")
|
||||
|
@@ -20,6 +20,7 @@ from requests_toolbelt import MultipartDecoder
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack import sdk_platform as sdk
|
||||
@@ -97,6 +98,11 @@ class SysinvSyncThread(SyncThread):
|
||||
.format(request.orch_job.operation_type,
|
||||
rsrc.resource_type))
|
||||
raise exceptions.SyncRequestFailed
|
||||
except exceptions.CertificateExpiredException as e:
|
||||
LOG.info("{} {} aborted: {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type,
|
||||
str(e)), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestAbortedBySystem
|
||||
except (exceptions.ConnectionRefused, exceptions.TimeOut,
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
@@ -210,7 +216,15 @@ class SysinvSyncThread(SyncThread):
|
||||
certificate_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
payload = certificate_dict.get('payload')
|
||||
|
||||
if not payload:
|
||||
if payload and 'expiry_date' in payload:
|
||||
expiry_datetime = timeutils.normalize_time(
|
||||
timeutils.parse_isotime(payload['expiry_date']))
|
||||
|
||||
if timeutils.utcnow() > expiry_datetime:
|
||||
LOG.info("create_certificate Certificate %s has expired at %s"
|
||||
% (payload['signature'], str(expiry_datetime)))
|
||||
raise exceptions.CertificateExpiredException
|
||||
else:
|
||||
LOG.info("create_certificate No payload found in resource_info"
|
||||
"{}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
@@ -304,6 +318,9 @@ class SysinvSyncThread(SyncThread):
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.CertificateExpiredException as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.CertificateExpiredException
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
@@ -295,7 +295,6 @@ class SyncThread(object):
|
||||
LOG.info("{}: starting sync routine".format(self.subcloud_name),
|
||||
extra=self.log_extra)
|
||||
region_name = self.subcloud_name
|
||||
|
||||
sync_requests = []
|
||||
# We want to check for pending work even if subcloud is disabled.
|
||||
|
||||
@@ -339,6 +338,8 @@ class SyncThread(object):
|
||||
else:
|
||||
# Subcloud is enabled and there are pending sync requests, so
|
||||
# we have work to do.
|
||||
|
||||
request_aborted = False
|
||||
try:
|
||||
for request in actual_sync_requests:
|
||||
if not self.is_subcloud_enabled() or \
|
||||
@@ -398,6 +399,12 @@ class SyncThread(object):
|
||||
consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.save()
|
||||
retry_count = self.MAX_RETRY
|
||||
except exceptions.SyncRequestAbortedBySystem:
|
||||
request.state = \
|
||||
consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.save()
|
||||
retry_count = self.MAX_RETRY
|
||||
request_aborted = True
|
||||
|
||||
# If we fall out of the retry loop we either succeeded
|
||||
# or failed multiple times and want to move to the next
|
||||
@@ -415,13 +422,22 @@ class SyncThread(object):
|
||||
target_region_name=region_name,
|
||||
states=states)
|
||||
|
||||
if sync_requests and sync_status_start != dcm_consts.SYNC_STATUS_OUT_OF_SYNC:
|
||||
if (sync_requests and
|
||||
sync_status_start != dcm_consts.SYNC_STATUS_OUT_OF_SYNC):
|
||||
self.set_sync_status(dcm_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
LOG.info("End of resource sync out-of-sync" + str(len(sync_requests)) + " sync request(s)",
|
||||
LOG.info("End of resource sync out-of-sync. " +
|
||||
str(len(sync_requests)) + " sync request(s)",
|
||||
extra=self.log_extra)
|
||||
elif sync_requests and request_aborted:
|
||||
if sync_status_start != dcm_consts.SYNC_STATUS_OUT_OF_SYNC:
|
||||
self.set_sync_status(dcm_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
LOG.info("End of resource sync out-of-sync. " +
|
||||
str(len(sync_requests)) + " sync request(s)" +
|
||||
": request_aborted", extra=self.log_extra)
|
||||
elif sync_status_start != dcm_consts.SYNC_STATUS_IN_SYNC:
|
||||
self.set_sync_status(dcm_consts.SYNC_STATUS_IN_SYNC)
|
||||
LOG.info("End of resource sync in-sync" + str(len(sync_requests)) + " sync request(s)",
|
||||
LOG.info("End of resource sync in-sync. " +
|
||||
str(len(sync_requests)) + " sync request(s)",
|
||||
extra=self.log_extra)
|
||||
|
||||
LOG.info("Sync resources done for subcloud", extra=self.log_extra)
|
||||
|
Reference in New Issue
Block a user