Provide new parameter to disable health check
Provide a new parameter in cluster API initilalize func to disable health check and endpoint accessiblitlity check. By default the value is True, for some scenarios, when creating a nsxlib object, users does not intend to validate the endpoint state, for example, in ncp election process. Change-Id: I6485a91f1d764fbb7ae3edc61541b7cd9f97682e
This commit is contained in:
parent
10366f00ba
commit
ce1d1e2424
@ -396,7 +396,8 @@ class NsxClientTestCase(NsxLibTestCase):
|
||||
return client
|
||||
|
||||
def new_mocked_cluster(self, conf_managers, validate_conn_func,
|
||||
concurrent_connections=None, exceptions=None):
|
||||
concurrent_connections=None, exceptions=None,
|
||||
enable_health_check=True):
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = validate_conn_func
|
||||
@ -406,6 +407,7 @@ class NsxClientTestCase(NsxLibTestCase):
|
||||
nsxlib_config.exception_config = exceptions
|
||||
if concurrent_connections:
|
||||
nsxlib_config.concurrent_connections = concurrent_connections
|
||||
nsxlib_config.enable_health_check = enable_health_check
|
||||
nsxlib_config.http_provider = mock_provider
|
||||
nsxlib_config.nsx_api_managers = conf_managers
|
||||
|
||||
|
@ -553,3 +553,11 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
api = self.mock_nsx_clustered_api()
|
||||
# just make sure this api is defined, and does not crash
|
||||
api._reinit_cluster()
|
||||
|
||||
def test_initialize_cluster_without_health_check(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12']
|
||||
validate_fn = mock.MagicMock()
|
||||
api = self.new_mocked_cluster(conf_managers, validate_fn,
|
||||
enable_health_check=False)
|
||||
self.assertEqual(cluster.ClusterHealth.GREEN, api.health)
|
||||
validate_fn.assert_not_called()
|
||||
|
@ -451,18 +451,21 @@ class ClusteredAPI(object):
|
||||
keepalive_interval=33,
|
||||
api_rate_limit=None,
|
||||
api_rate_mode=None,
|
||||
api_log_mode=None):
|
||||
api_log_mode=None,
|
||||
enable_health_check=True):
|
||||
|
||||
self._http_provider = http_provider
|
||||
self._keepalive_interval = keepalive_interval
|
||||
self._print_keepalive = 0
|
||||
self._silent = False
|
||||
self._api_call_collectors = []
|
||||
self._enable_health_check = enable_health_check
|
||||
|
||||
def _init_cluster(*args, **kwargs):
|
||||
self._init_endpoints(providers, min_conns_per_pool,
|
||||
max_conns_per_pool, api_rate_limit,
|
||||
api_rate_mode, api_log_mode)
|
||||
api_rate_mode, api_log_mode,
|
||||
enable_health_check)
|
||||
|
||||
_init_cluster()
|
||||
|
||||
@ -476,7 +479,7 @@ class ClusteredAPI(object):
|
||||
|
||||
def _init_endpoints(self, providers, min_conns_per_pool,
|
||||
max_conns_per_pool, api_rate_limit, api_rate_mode,
|
||||
api_log_mode):
|
||||
api_log_mode, enable_health_check=True):
|
||||
LOG.debug("Initializing API endpoints")
|
||||
|
||||
def _create_conn(p):
|
||||
@ -516,30 +519,35 @@ class ClusteredAPI(object):
|
||||
# duck type to proxy http invocations
|
||||
for method in ClusteredAPI._HTTP_VERBS:
|
||||
setattr(self, method, self._proxy_stub(method))
|
||||
|
||||
conns = greenpool.GreenPool()
|
||||
for endpoint in self._endpoints.values():
|
||||
conns.spawn(self._validate, endpoint)
|
||||
eventlet.sleep(0)
|
||||
while conns.running():
|
||||
if (self.health == ClusterHealth.GREEN or
|
||||
self.health == ClusterHealth.ORANGE):
|
||||
# only wait for 1 or more endpoints to reduce init time
|
||||
break
|
||||
eventlet.sleep(0.5)
|
||||
|
||||
if len(self._endpoints) > 1:
|
||||
# We don't monitor connectivity when one endpoint is available,
|
||||
# since there is no alternative to querying this single backend
|
||||
# If endpoint was down, we can tolerate extra roundtrip to
|
||||
# validate connectivity
|
||||
# If health check is disabled, skip endpoint accessiblity check
|
||||
# and health check loop. Set api health to GREEN.
|
||||
if enable_health_check:
|
||||
conns = greenpool.GreenPool()
|
||||
for endpoint in self._endpoints.values():
|
||||
# dynamic loop for each endpoint to ensure connectivity
|
||||
loop = loopingcall.DynamicLoopingCall(
|
||||
self._endpoint_keepalive, endpoint)
|
||||
loop.start(initial_delay=self._keepalive_interval,
|
||||
periodic_interval_max=self._keepalive_interval,
|
||||
stop_on_exception=False)
|
||||
conns.spawn(self._validate, endpoint)
|
||||
eventlet.sleep(0)
|
||||
while conns.running():
|
||||
if (self.health == ClusterHealth.GREEN or
|
||||
self.health == ClusterHealth.ORANGE):
|
||||
# only wait for 1 or more endpoints to reduce init time
|
||||
break
|
||||
eventlet.sleep(0.5)
|
||||
|
||||
if len(self._endpoints) > 1:
|
||||
# We don't monitor connectivity when one endpoint is available,
|
||||
# since there is no alternative to querying this single backend
|
||||
# If endpoint was down, we can tolerate extra roundtrip to
|
||||
# validate connectivity
|
||||
for endpoint in self._endpoints.values():
|
||||
# dynamic loop for each endpoint to ensure connectivity
|
||||
loop = loopingcall.DynamicLoopingCall(
|
||||
self._endpoint_keepalive, endpoint)
|
||||
loop.start(initial_delay=self._keepalive_interval,
|
||||
periodic_interval_max=self._keepalive_interval,
|
||||
stop_on_exception=False)
|
||||
else:
|
||||
for endpoint in self._endpoints.values():
|
||||
endpoint.set_state(EndpointState.UP)
|
||||
|
||||
LOG.debug("Done initializing API endpoint(s). "
|
||||
"API cluster health: %s", self.health)
|
||||
@ -831,7 +839,8 @@ class NSXClusteredAPI(ClusteredAPI):
|
||||
keepalive_interval=self.nsxlib_config.conn_idle_timeout,
|
||||
api_rate_limit=self.nsxlib_config.api_rate_limit_per_endpoint,
|
||||
api_rate_mode=self.nsxlib_config.api_rate_mode,
|
||||
api_log_mode=self.nsxlib_config.api_log_mode)
|
||||
api_log_mode=self.nsxlib_config.api_log_mode,
|
||||
enable_health_check=self.nsxlib_config.enable_health_check)
|
||||
|
||||
LOG.debug("Created NSX clustered API with '%s' "
|
||||
"provider", self._http_provider.provider_id)
|
||||
|
@ -158,6 +158,14 @@ class NsxLibConfig(object):
|
||||
When set to API_LOG_PER_ENDPOINT, API calls sent to
|
||||
each endpoint will be collected individually.
|
||||
By default, this option is disabled as set to None.
|
||||
:param enable_health_check: Options to enable or disable health check for
|
||||
all endpoints when initializing cluster API.
|
||||
The checking including endpoint connection
|
||||
validation and health check loop.
|
||||
For some condition, eg election process. It
|
||||
does not need to check the endpoint's
|
||||
accessibility.
|
||||
By default, this option is set to True.
|
||||
|
||||
-- Additional parameters which are relevant only for the Policy manager:
|
||||
:param allow_passthrough: If True, use nsx manager api for cases which are
|
||||
@ -198,7 +206,8 @@ class NsxLibConfig(object):
|
||||
api_rate_limit_per_endpoint=None,
|
||||
api_rate_mode=None,
|
||||
exception_config=None,
|
||||
api_log_mode=None):
|
||||
api_log_mode=None,
|
||||
enable_health_check=True):
|
||||
|
||||
self.nsx_api_managers = nsx_api_managers
|
||||
self._username = username
|
||||
@ -230,6 +239,7 @@ class NsxLibConfig(object):
|
||||
self.api_rate_mode = api_rate_mode
|
||||
self.exception_config = exception_config or ExceptionConfig()
|
||||
self.api_log_mode = api_log_mode
|
||||
self.enable_health_check = enable_health_check
|
||||
|
||||
if len(nsx_api_managers) == 1 and not self.cluster_unavailable_retry:
|
||||
LOG.warning("When only one endpoint is provided, keepalive probes"
|
||||
|
Loading…
Reference in New Issue
Block a user