Merge "Update the sysinv Kubernetes upgrade mechanism"

This commit is contained in:
Zuul 2021-08-25 15:00:07 +00:00 committed by Gerrit Code Review
commit aa425a4c88
7 changed files with 164 additions and 23 deletions

View File

@ -296,6 +296,14 @@ class KubeUpgradeController(rest.RestController):
service_affecting=False)
fm_api.FaultAPIs().set_fault(fault)
# Set the new kubeadm version in the DB.
# This will not actually change the bind mounts until we apply a
# puppet manifest that makes use of it.
kube_cmd_versions = objects.kube_cmd_version.get(
pecan.request.context)
kube_cmd_versions.kubeadm_version = to_version.lstrip('v')
kube_cmd_versions.save()
LOG.info("Started kubernetes upgrade from version: %s to version: %s"
% (current_kube_version, to_version))
@ -397,6 +405,12 @@ class KubeUpgradeController(rest.RestController):
raise wsme.exc.ClientSideError(_(
"Kubernetes to_version must be active to complete"))
# Set the new kubelet version in the DB.
kube_cmd_versions = objects.kube_cmd_version.get(
pecan.request.context)
kube_cmd_versions.kubelet_version = kube_upgrade_obj.to_version.lstrip('v')
kube_cmd_versions.save()
# All is well, mark the upgrade as complete
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_COMPLETE
kube_upgrade_obj.save()

View File

@ -53,6 +53,7 @@ KUBERNETES_ADMIN_USER = "kubernetes-admin"
# Possible states for each supported kubernetes version
KUBE_STATE_AVAILABLE = 'available'
KUBE_STATE_UNAVAILABLE = 'unavailable'
KUBE_STATE_ACTIVE = 'active'
KUBE_STATE_PARTIAL = 'partial'
@ -130,6 +131,24 @@ def get_kube_versions():
'applied_patches': [],
'available_patches': [],
},
{'version': 'v1.19.13',
'upgrade_from': ['v1.18.1'],
'downgrade_to': [],
'applied_patches': [],
'available_patches': [],
},
{'version': 'v1.20.9',
'upgrade_from': ['v1.19.13'],
'downgrade_to': [],
'applied_patches': [],
'available_patches': [],
},
{'version': 'v1.21.3',
'upgrade_from': ['v1.20.9'],
'downgrade_to': [],
'applied_patches': [],
'available_patches': [],
},
]
@ -748,7 +767,8 @@ class KubeOperator(object):
# Set counts to 0
version_counts = dict()
for version in get_kube_versions():
kube_versions = get_kube_versions()
for version in kube_versions:
version_counts[version['version']] = 0
# Count versions running on control plane
@ -778,11 +798,17 @@ class KubeOperator(object):
active_candidates.append(version)
else:
# This version is not running anywhere
version_states[version] = KUBE_STATE_AVAILABLE
version_states[version] = KUBE_STATE_UNAVAILABLE
# If only a single version is running, then mark it as active
if len(active_candidates) == 1:
version_states[active_candidates[0]] = KUBE_STATE_ACTIVE
active_version = active_candidates[0]
version_states[active_version] = KUBE_STATE_ACTIVE
# mark the versions who can upgrade_from the active one as available
for version in kube_versions:
if active_version in version['upgrade_from']:
version_states[version['version']] = KUBE_STATE_AVAILABLE
return version_states

View File

@ -89,7 +89,7 @@ class KubernetesPuppet(base.BasePuppet):
config.update(self._get_host_k8s_certificates_config(host))
# Get the kubernetes version for this host
config.update(self._get_kubeadm_kubelet_version())
config.update(self._get_kubeadm_kubelet_version(host))
return config
@ -353,18 +353,40 @@ class KubernetesPuppet(base.BasePuppet):
return config
def _get_kubeadm_kubelet_version(self):
def _get_kubeadm_kubelet_version(self, host):
config = {}
kubeadm_version = None
kubelet_version = None
kube_upgrade_state = None
# Grab the upgrade state if any.
try:
kube_upgrade_obj = objects.kube_upgrade.get_one(
self.context)
kube_upgrade_state = kube_upgrade_obj.state
except exception.NotFound:
pass
try:
kube_version = self.dbapi.kube_cmd_version_get()
# kubeadm version is system-wide
kubeadm_version = kube_version.kubeadm_version
# default kubelet version is system-wide
kubelet_version = kube_version.kubelet_version
# If there's a k8s upgrade in progress the kubelet version
# is determined by the upgrade state of the host.
if kube_upgrade_state:
kube_host_upgrade = objects.kube_host_upgrade.get_by_host_id(
self.context, host.id)
if kube_host_upgrade.status == kubernetes.KUBE_HOST_UPGRADING_KUBELET:
kubelet_version = kube_host_upgrade.target_version.lstrip('v')
config.update({'platform::kubernetes::params::kubeadm_version': kubeadm_version})
config.update({'platform::kubernetes::params::kubelet_version': kubelet_version})
except Exception:
LOG.exception("Exception getting kubeadm kubelet version")
raise exception.KubeVersionUnavailable()
return config

View File

@ -263,6 +263,11 @@ class TestPostKubeUpgrade(TestKubeUpgrade,
self.assertEqual(result.json['to_version'], 'v1.43.2')
self.assertEqual(result.json['state'],
kubernetes.KUBE_UPGRADE_STARTED)
# see if kubeadm_version was changed in DB
kube_cmd_version = self.dbapi.kube_cmd_version_get()
self.assertEqual(kube_cmd_version.kubeadm_version, '1.43.2')
# Verify that the target version for the host was updated
kube_host_upgrade = self.dbapi.kube_host_upgrade_get_by_host(
self.host.id)
@ -730,12 +735,17 @@ class TestPatch(TestKubeUpgrade,
'value': new_state,
'op': 'replace'}],
headers={'User-Agent': 'sysinv-test'})
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json['from_version'], 'v1.43.1')
self.assertEqual(response.json['to_version'], 'v1.43.2')
self.assertEqual(response.json['state'], new_state)
# see if kubelet_version was changed in DB
kube_cmd_version = self.dbapi.kube_cmd_version_get()
self.assertEqual(kube_cmd_version.kubelet_version, '1.43.2')
# Verify that the upgrade was updated with the new state
result = self.get_json('/kube_upgrade/%s' % uuid)
self.assertEqual(result['from_version'], 'v1.43.1')

View File

@ -866,12 +866,12 @@ class TestKubeOperator(base.TestCase):
result = self.kube_operator.kube_get_version_states()
assert result == {'v1.41.3': 'available',
'v1.42.0': 'available',
assert result == {'v1.41.3': 'unavailable',
'v1.42.0': 'unavailable',
'v1.42.1': 'partial',
'v1.42.3': 'available',
'v1.42.3': 'unavailable',
'v1.42.4': 'partial',
'v1.43.1': 'available'}
'v1.43.1': 'unavailable'}
def test_kube_get_version_states_active(self):
@ -881,12 +881,12 @@ class TestKubeOperator(base.TestCase):
"v1.42.1"
result = self.kube_operator.kube_get_version_states()
assert result == {'v1.41.3': 'available',
'v1.42.0': 'available',
assert result == {'v1.41.3': 'unavailable',
'v1.42.0': 'unavailable',
'v1.42.1': 'active',
'v1.42.3': 'available',
'v1.42.4': 'available',
'v1.43.1': 'available'}
'v1.43.1': 'unavailable'}
def test_kube_get_version_states_multi_node(self):
@ -894,12 +894,12 @@ class TestKubeOperator(base.TestCase):
self.list_node_result = self.multi_node_result
result = self.kube_operator.kube_get_version_states()
assert result == {'v1.41.3': 'available',
'v1.42.0': 'available',
assert result == {'v1.41.3': 'unavailable',
'v1.42.0': 'unavailable',
'v1.42.1': 'partial',
'v1.42.3': 'partial',
'v1.42.4': 'partial',
'v1.43.1': 'available'}
'v1.43.1': 'unavailable'}
def test_kube_get_version_states_ignore_unknown_version(self):
@ -911,12 +911,12 @@ class TestKubeOperator(base.TestCase):
"v1.49.1"
result = self.kube_operator.kube_get_version_states()
assert result == {'v1.41.3': 'available',
'v1.42.0': 'available',
assert result == {'v1.41.3': 'unavailable',
'v1.42.0': 'unavailable',
'v1.42.1': 'active',
'v1.42.3': 'available',
'v1.42.4': 'available',
'v1.43.1': 'available'}
'v1.43.1': 'unavailable'}
def test_kube_get_kubernetes_version(self):

View File

@ -345,6 +345,14 @@ def get_test_kube_rootca_host_update(**kw):
return rootca_host_update
def update_kube_host_upgrade(**kw):
dbapi = db_api.get_instance()
host_upgrade = dbapi.kube_host_upgrade_get_by_host(1)
host_upgrade = dbapi.kube_host_upgrade_update(
host_upgrade.id, kw)
return host_upgrade
def create_test_kube_upgrade(**kw):
upgrade = get_test_kube_upgrade(**kw)
@ -356,7 +364,11 @@ def create_test_kube_upgrade(**kw):
del upgrade['uuid']
dbapi = db_api.get_instance()
return dbapi.kube_upgrade_create(upgrade)
kube_upgrade = dbapi.kube_upgrade_create(upgrade)
# Also update the kubeadm version like the API would do.
dbapi.kube_cmd_version_update(
{"kubeadm_version": kube_upgrade.to_version.lstrip("v")})
return kube_upgrade
def create_test_kube_host_upgrade():

View File

@ -11,10 +11,12 @@ import uuid
from sysinv.common import utils
from sysinv.common import constants
from sysinv.common import device as dconstants
from sysinv.common.kubernetes import KUBERNETES_DEFAULT_VERSION
from sysinv.common import kubernetes
from sysinv.puppet import interface
from sysinv.puppet import puppet
from sysinv.tests.db import base as dbbase
from sysinv.tests.db import utils as dbutils
from sysinv.tests.puppet import base
from sysinv.tests.puppet import test_interface
@ -260,11 +262,66 @@ class SriovdpTestCase(test_interface.InterfaceTestCaseMixin, dbbase.BaseHostTest
}
self.assertEqual(expected, actual)
class KubeVersionTestCase(base.PuppetTestCaseMixin, dbbase.BaseHostTestCase):
def setUp(self):
super(KubeVersionTestCase, self).setUp()
# Create a host
self.host = self._create_test_host(constants.WORKER)
self._update_context()
@puppet.puppet_context
def _update_context(self):
self.context = {}
def test_kubernetes_versions_in_hieradata(self):
config = self.operator.kubernetes._get_kubeadm_kubelet_version()
config = self.operator.kubernetes._get_kubeadm_kubelet_version(self.host)
kubeadm_version = config.get("platform::kubernetes::params::kubeadm_version")
kubelet_version = config.get("platform::kubernetes::params::kubelet_version")
self.assertEqual(kubeadm_version, KUBERNETES_DEFAULT_VERSION)
self.assertEqual(kubelet_version, KUBERNETES_DEFAULT_VERSION)
self.assertEqual(kubeadm_version, kubernetes.KUBERNETES_DEFAULT_VERSION)
self.assertEqual(kubelet_version, kubernetes.KUBERNETES_DEFAULT_VERSION)
def test_kubernetes_versions_in_hieradata_upgrade_started(self):
dbutils.create_test_kube_upgrade(
from_version=kubernetes.KUBERNETES_DEFAULT_VERSION,
to_version='v1.19.13',
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
)
dbutils.update_kube_host_upgrade(
target_version='v1.19.13',
status=kubernetes.KUBE_HOST_UPGRADING_CONTROL_PLANE,
)
config = self.operator.kubernetes._get_kubeadm_kubelet_version(self.host)
kubeadm_version = config.get("platform::kubernetes::params::kubeadm_version")
kubelet_version = config.get("platform::kubernetes::params::kubelet_version")
self.assertEqual(kubeadm_version, '1.19.13')
self.assertEqual(kubelet_version, kubernetes.KUBERNETES_DEFAULT_VERSION)
def test_kubernetes_versions_in_hieradata_upgrade_kubelet(self):
dbutils.create_test_kube_upgrade(
from_version=kubernetes.KUBERNETES_DEFAULT_VERSION,
to_version='v1.19.13',
state=kubernetes.KUBE_UPGRADING_KUBELETS,
)
dbutils.update_kube_host_upgrade(
target_version='v1.19.13',
status=kubernetes.KUBE_HOST_UPGRADING_KUBELET,
)
config = self.operator.kubernetes._get_kubeadm_kubelet_version(self.host)
kubeadm_version = config.get("platform::kubernetes::params::kubeadm_version")
kubelet_version = config.get("platform::kubernetes::params::kubelet_version")
self.assertEqual(kubeadm_version, '1.19.13')
self.assertEqual(kubelet_version, '1.19.13')