Debian: Add kubernetes 1.24.4 remaining patches
This ports the remaining kubernetes 1.24.4 patches. The following patches were refactored slightly to allow for upstream changes: kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch kubelet-cpumanager-keep-normal-containers-off-reserv.patch The following patch was added to get tests working again: cpumanager-policy-static-test-refactor.patch Test-plan: 1. Revert-use-subpath-for-coredns-only-for-default-repo: Kubeadm commands worked as expected 2. enable-support-for-kubernetes-to-ignore-isolcpus: Set kube-cpu-mgr-policy as static and allocated some isolcpus. i. Deployed a pod with dedicated CPU and verified that it is not affined to isolcpus. ii. kube-ignore-isol-cpus is set to enabled, deployed a pod with dedicated CPU and verified that is allocated to isolated CPU. 3. kubeadm-create-platform-pods-with-zero-CPU-resources: Verified the usage of CPUs is 0 in coredns, kube-controller-manager, kube-scheduler, and kube-apiserver pods of kube-system namespace. 4. kubelet-cpumanager-disable-CFS-quota-throttling-for- : Verified that pods that in the "Guaranteed" QoS class, on hosts that have "kube-cpu-mgr-policy=static" have cpu.cfs_quota_us set to -1. 5. kubelet-cpumanager-infra-pods-use-system-reserved-CP: Verified that platform pods are affined to platform CPUs 6. kubelet-cpumanager-introduce-concept-of-isolated-CPU: Verified pods can allocate isolated CPUs and are affined to them. Verified pods allocating application CPUs don't get isolated CPUs. Verified that pods allocating dedicated and isolated CPUs are affined to the dedicated CPUs. Verified that pods allocating non-dedicated and isolated CPUs are affined to the isolated CPUs. 7. kubelet-cpumanager-keep-normal-containers-off-reserv: Verified the pod which is not in platform namespace are affined to application or application isolated CPUs 8. kubelet-sort-isolcpus-allocation-when-SMT-enabled: Verified after enabling SMT multithreading that isolated CPUs are allocated as lowest-numbered SMT siblings first and then higher-numbered SMT siblings, then any single thread. 9. kubernetes-make-isolcpus-allocation-SMT-aware: Verified after enabling SMT multithreading that isolated CPUs are allocated as pairs of SMT siblings first, then already-existing single SMT siblings, then we allocate one of a pair of SMT siblings as a last resort. Story: 2010301 Task: 46315 Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com> Signed-off-by: Chris Friesen <chris.friesen@windriver.com> Change-Id: Ic8f3d53f58f09ae13f9c299fb31e5f91a0a5bc9f
This commit is contained in:
parent
bced15251c
commit
d0e346c423
@ -0,0 +1,85 @@
|
||||
From 9ede7db445ff799d78fae4c20d9558962573ede7 Mon Sep 17 00:00:00 2001
|
||||
From: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
Date: Tue, 11 Oct 2022 09:09:00 -0400
|
||||
Subject: [PATCH] cpumanager policy static test refactor
|
||||
|
||||
This refactors the tests which were breaking due to changes in isolated
|
||||
CPUs and reserved CPUs.
|
||||
|
||||
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
|
||||
---
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 11 ++++++++---
|
||||
pkg/kubelet/cm/cpumanager/policy_static_test.go | 5 +++--
|
||||
2 files changed, 11 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
index 31e4d0585fb..87c4ae036ca 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
@@ -715,6 +715,8 @@ func TestCPUManagerRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconcileState(t *testing.T) {
|
||||
+ testExcl := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testPolicy, _ := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 8,
|
||||
@@ -733,8 +735,11 @@ func TestReconcileState(t *testing.T) {
|
||||
},
|
||||
0,
|
||||
cpuset.NewCPUSet(),
|
||||
+ cpuset.NewCPUSet(),
|
||||
topologymanager.NewFakeManager(),
|
||||
- nil)
|
||||
+ nil,
|
||||
+ testDM,
|
||||
+ testExcl)
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -1369,8 +1374,8 @@ func TestCPUManagerHandlePolicyOptions(t *testing.T) {
|
||||
t.Errorf("cannot create state file: %s", err.Error())
|
||||
}
|
||||
defer os.RemoveAll(sDir)
|
||||
-
|
||||
- _, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.NewCPUSet(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
|
||||
+ testDM, err := devicemanager.NewManagerStub()
|
||||
+ _, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.NewCPUSet(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager(), testDM)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error, but NewManager succeeded")
|
||||
}
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
index 39aaaf95b28..d0308556c6d 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
@@ -202,7 +202,6 @@ func TestStaticPolicyAdd(t *testing.T) {
|
||||
largeTopoCPUSet := largeTopoBuilder.Result()
|
||||
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
|
||||
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
|
||||
- testDM, _ := devicemanager.NewManagerStub()
|
||||
// these are the cases which must behave the same regardless the policy options.
|
||||
// So we will permutate the options to ensure this holds true.
|
||||
optionsInsensitiveTestCases := []staticPolicyTest{
|
||||
@@ -576,6 +575,8 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
|
||||
}
|
||||
|
||||
func TestStaticPolicyReuseCPUs(t *testing.T) {
|
||||
+ excludeReserved := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testCases := []struct {
|
||||
staticPolicyTest
|
||||
expCSetAfterAlloc cpuset.CPUSet
|
||||
@@ -692,7 +693,7 @@ func TestStaticPolicyRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, excludeReserved)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,79 @@
|
||||
From 922275232089538f73c96825f8036de9aa9da9a1 Mon Sep 17 00:00:00 2001
|
||||
From: Chris Friesen <chris.friesen@windriver.com>
|
||||
Date: Fri, 23 Oct 2020 17:46:10 -0600
|
||||
Subject: [PATCH] enable support for kubernetes to ignore isolcpus
|
||||
|
||||
The normal mechanisms for allocating isolated CPUs do not allow
|
||||
a mix of isolated and exclusive CPUs in the same container. In
|
||||
order to allow this in *very* limited cases where the pod spec
|
||||
is known in advance we will add the ability to disable the normal
|
||||
isolcpus behaviour.
|
||||
|
||||
If the file "/etc/kubernetes/ignore_isolcpus" exists, then kubelet
|
||||
will basically forget everything it knows about isolcpus and just
|
||||
treat them like regular CPUs.
|
||||
|
||||
The admin user can then rely on the fact that CPU allocation is
|
||||
deterministic to ensure that the isolcpus they configure end up being
|
||||
allocated to the correct pods.
|
||||
|
||||
Signed-off-by: Daniel Safta <daniel.safta@windriver.com>
|
||||
---
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager.go | 8 ++++++++
|
||||
pkg/kubelet/cm/cpumanager/policy_static.go | 7 +++++++
|
||||
2 files changed, 15 insertions(+)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
index ea466dbcd37..0ca3b2666b4 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
+ "os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -55,6 +56,13 @@ const cpuManagerStateFileName = "cpu_manager_state"
|
||||
|
||||
// get the system-level isolated CPUs
|
||||
func getIsolcpus() cpuset.CPUSet {
|
||||
+ // This is a gross hack to basically turn off awareness of isolcpus to enable
|
||||
+ // isolated cpus to be allocated to pods the same way as non-isolated CPUs.
|
||||
+ if _, err := os.Stat("/etc/kubernetes/ignore_isolcpus"); err == nil {
|
||||
+ klog.Infof("[cpumanager] turning off isolcpus awareness")
|
||||
+ return cpuset.NewCPUSet()
|
||||
+ }
|
||||
+
|
||||
dat, err := ioutil.ReadFile("/sys/devices/system/cpu/isolated")
|
||||
if err != nil {
|
||||
klog.Errorf("[cpumanager] unable to read sysfs isolcpus subdir")
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
index d6fe69e7165..ce82250ee5e 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
@@ -18,6 +18,7 @@ package cpumanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
+ "os"
|
||||
"strconv"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -680,6 +681,12 @@ func isKubeInfra(pod *v1.Pod) bool {
|
||||
|
||||
// get the isolated CPUs (if any) from the devices associated with a specific container
|
||||
func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
|
||||
+ // This is a gross hack to basically turn off awareness of isolcpus to enable
|
||||
+ // isolated cpus to be allocated to pods the same way as non-isolated CPUs.
|
||||
+ if _, err := os.Stat("/etc/kubernetes/ignore_isolcpus"); err == nil {
|
||||
+ return cpuset.NewCPUSet()
|
||||
+ }
|
||||
+
|
||||
// NOTE: This is required for TestStaticPolicyAdd() since makePod() does
|
||||
// not create UID. We also need a way to properly stub devicemanager.
|
||||
if len(string(pod.UID)) == 0 {
|
||||
--
|
||||
2.25.1
|
||||
|
@ -1,7 +1,7 @@
|
||||
From de653bd0823b248d623a39c17a3872e85ce952b0 Mon Sep 17 00:00:00 2001
|
||||
From: Chris Friesen <chris.friesen@windriver.com>
|
||||
Date: Fri, 3 Sep 2021 18:05:15 -0400
|
||||
Subject: [PATCH 5/7] kubeadm: create platform pods with zero CPU resources
|
||||
Subject: [PATCH] kubeadm: create platform pods with zero CPU resources
|
||||
|
||||
We want to specify zero CPU resources when creating the manifests
|
||||
for the static platform pods, as a workaround for the lack of
|
||||
|
@ -0,0 +1,255 @@
|
||||
From 95e547b2d3d0af6b0f2083c064bcbdbe39716250 Mon Sep 17 00:00:00 2001
|
||||
From: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
Date: Mon, 3 Oct 2022 19:19:48 -0400
|
||||
Subject: [PATCH] kubelet cpumanager disable CFS quota throttling
|
||||
|
||||
This disables CFS CPU quota to avoid performance degradation due to
|
||||
Linux kernel CFS quota implementation. Note that 4.18 kernel attempts
|
||||
to solve the CFS throttling problem, but there are reports that it is
|
||||
not completely effective.
|
||||
|
||||
This disables CFS quota throttling for Guaranteed pods for both
|
||||
parent and container cgroups by writing -1 to cgroup cpu.cfs_quota_us.
|
||||
Disabling has a dramatic latency improvement for HTTP response times.
|
||||
|
||||
This patch is refactored in 1.22.5 due to new internal_container_lifecycle
|
||||
framework. We leverage the same mechanism to set Linux resources as:
|
||||
cpu manager: specify the container CPU set during the creation
|
||||
|
||||
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
||||
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
---
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager.go | 7 +++
|
||||
pkg/kubelet/cm/cpumanager/fake_cpu_manager.go | 10 ++++-
|
||||
pkg/kubelet/cm/helpers_linux.go | 10 +++++
|
||||
pkg/kubelet/cm/helpers_linux_test.go | 43 ++++++++++---------
|
||||
.../cm/internal_container_lifecycle_linux.go | 9 ++++
|
||||
5 files changed, 57 insertions(+), 22 deletions(-)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
index dde49b6ec8c..df431b06601 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
@@ -72,6 +72,9 @@ type Manager interface {
|
||||
// State returns a read-only interface to the internal CPU manager state.
|
||||
State() state.Reader
|
||||
|
||||
+ // GetCPUPolicy returns the assigned CPU manager policy
|
||||
+ GetCPUPolicy() string
|
||||
+
|
||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||
// and is consulted to achieve NUMA aware resource alignment among this
|
||||
// and other resource controllers.
|
||||
@@ -314,6 +317,10 @@ func (m *manager) State() state.Reader {
|
||||
return m.state
|
||||
}
|
||||
|
||||
+func (m *manager) GetCPUPolicy() string {
|
||||
+ return m.policy.Name()
|
||||
+}
|
||||
+
|
||||
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
// The pod is during the admission phase. We need to save the pod to avoid it
|
||||
// being cleaned before the admission ended
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
||||
index 93369705135..2e277da9c84 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
|
||||
@@ -28,7 +28,8 @@ import (
|
||||
)
|
||||
|
||||
type fakeManager struct {
|
||||
- state state.State
|
||||
+ policy Policy
|
||||
+ state state.State
|
||||
}
|
||||
|
||||
func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
|
||||
@@ -70,6 +71,10 @@ func (m *fakeManager) State() state.Reader {
|
||||
return m.state
|
||||
}
|
||||
|
||||
+func (m *fakeManager) GetCPUPolicy() string {
|
||||
+ return m.policy.Name()
|
||||
+}
|
||||
+
|
||||
func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
|
||||
klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName)
|
||||
return cpuset.CPUSet{}
|
||||
@@ -88,6 +93,7 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
|
||||
// NewFakeManager creates empty/fake cpu manager
|
||||
func NewFakeManager() Manager {
|
||||
return &fakeManager{
|
||||
- state: state.NewMemoryState(),
|
||||
+ policy: &nonePolicy{},
|
||||
+ state: state.NewMemoryState(),
|
||||
}
|
||||
}
|
||||
diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go
|
||||
index 25ff3f13b82..e9ea6bab8dc 100644
|
||||
--- a/pkg/kubelet/cm/helpers_linux.go
|
||||
+++ b/pkg/kubelet/cm/helpers_linux.go
|
||||
@@ -182,6 +182,16 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
|
||||
// build the result
|
||||
result := &ResourceConfig{}
|
||||
if qosClass == v1.PodQOSGuaranteed {
|
||||
+ // Disable CFS CPU quota to avoid performance degradation due to
|
||||
+ // Linux kernel CFS throttle implementation.
|
||||
+ // NOTE: 4.18 kernel attempts to solve CFS throttling problem,
|
||||
+ // but there are reports that it is not completely effective.
|
||||
+ // This will configure cgroup CFS parameters at pod level:
|
||||
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/cpu.cfs_quota_us
|
||||
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/cpu.cfs_period_us
|
||||
+ cpuQuota = int64(-1)
|
||||
+ cpuPeriod = uint64(100000)
|
||||
+
|
||||
result.CpuShares = &cpuShares
|
||||
result.CpuQuota = &cpuQuota
|
||||
result.CpuPeriod = &cpuPeriod
|
||||
diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go
|
||||
index 101b21e682a..9b98fb7e1c1 100644
|
||||
--- a/pkg/kubelet/cm/helpers_linux_test.go
|
||||
+++ b/pkg/kubelet/cm/helpers_linux_test.go
|
||||
@@ -64,8 +64,9 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
+ guaranteedQuotaPeriod := uint64(100000)
|
||||
+ guaranteedQuota := int64(-1)
|
||||
+ guaranteedTunedQuota := int64(-1)
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
@@ -204,8 +205,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
- quotaPeriod: defaultQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@@ -218,8 +219,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
- quotaPeriod: defaultQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
@@ -232,8 +233,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
- quotaPeriod: tunedQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
@@ -246,8 +247,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
- quotaPeriod: tunedQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-init-containers": {
|
||||
pod: &v1.Pod{
|
||||
@@ -309,8 +310,10 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
+ guaranteedQuotaPeriod := uint64(100000)
|
||||
+ guaranteedQuota := int64(-1)
|
||||
+ guaranteedTunedQuota := int64(-1)
|
||||
+
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
@@ -449,8 +452,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
- quotaPeriod: defaultQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@@ -463,8 +466,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
- quotaPeriod: defaultQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
@@ -477,8 +480,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
- quotaPeriod: tunedQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
@@ -491,8 +494,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
- quotaPeriod: tunedQuotaPeriod,
|
||||
- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
+ quotaPeriod: guaranteedQuotaPeriod,
|
||||
+ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
diff --git a/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
||||
index cb7c0cfa543..75406dd8564 100644
|
||||
--- a/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
||||
+++ b/pkg/kubelet/cm/internal_container_lifecycle_linux.go
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
+ v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
)
|
||||
|
||||
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
|
||||
@@ -35,6 +36,14 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain
|
||||
}
|
||||
}
|
||||
|
||||
+ // Disable cgroup CFS throttle at the container level.
|
||||
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/<container>/cpu.cfs_quota_us
|
||||
+ // /sys/fs/cgroup/cpu/k8s-infra/kubepods/<pod>/<container>/cpu.cfs_period_us
|
||||
+ if i.cpuManager.GetCPUPolicy() == "static" && v1qos.GetPodQOS(pod) == v1.PodQOSGuaranteed {
|
||||
+ containerConfig.Linux.Resources.CpuPeriod = int64(100000)
|
||||
+ containerConfig.Linux.Resources.CpuQuota = int64(-1)
|
||||
+ }
|
||||
+
|
||||
if i.memoryManager != nil {
|
||||
numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container)
|
||||
if numaNodes.Len() > 0 {
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,150 @@
|
||||
From 70399c6eebe5216332e77b0f56ace7028168c726 Mon Sep 17 00:00:00 2001
|
||||
From: Gleb Aronsky <gleb.aronsky@windriver.com>
|
||||
Date: Mon, 21 Mar 2022 17:25:07 -0300
|
||||
Subject: [PATCH] kubelet cpumanager infra pods use system reserved CPUs
|
||||
|
||||
This assigns system infrastructure pods to the "reserved" cpuset
|
||||
to isolate them from the shared pool of CPUs.
|
||||
|
||||
Infrastructure pods include any pods that belong to the kube-system,
|
||||
armada, cert-manager, vault, platform-deployment-manager, portieris,
|
||||
notification, flux-helm or metrics-server namespaces.
|
||||
|
||||
The implementation is a bit simplistic, it is assumed that the
|
||||
"reserved" cpuset is large enough to handle all infrastructure pods
|
||||
CPU allocations.
|
||||
|
||||
This also prevents infrastucture pods from using Guaranteed resources.
|
||||
|
||||
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
||||
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
|
||||
Signed-off-by: Thiago Miranda <ThiagoOliveira.Miranda@windriver.com>
|
||||
Signed-off-by: Kaustubh Dhokte <kaustubh.dhokte@windriver.com>
|
||||
---
|
||||
pkg/kubelet/cm/cpumanager/policy_static.go | 47 +++++++++++++++++--
|
||||
.../cm/cpumanager/policy_static_test.go | 19 +++++++-
|
||||
2 files changed, 61 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
index 09e0fc0ea0e..a3c93a896df 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
@@ -53,6 +53,11 @@ func (e SMTAlignmentError) Type() string {
|
||||
return ErrorSMTAlignment
|
||||
}
|
||||
|
||||
+// Define namespaces used by platform infrastructure pods
|
||||
+var infraNamespaces = [...]string{
|
||||
+ "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", "metrics-server",
|
||||
+}
|
||||
+
|
||||
// staticPolicy is a CPU manager policy that does not change CPU
|
||||
// assignments for exclusively pinned guaranteed containers after the main
|
||||
// container process starts.
|
||||
@@ -121,10 +126,11 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
|
||||
klog.InfoS("Static policy created with configuration", "options", opts)
|
||||
|
||||
policy := &staticPolicy{
|
||||
- topology: topology,
|
||||
- affinity: affinity,
|
||||
- cpusToReuse: make(map[string]cpuset.CPUSet),
|
||||
- options: opts,
|
||||
+ topology: topology,
|
||||
+ affinity: affinity,
|
||||
+ excludeReserved: excludeReserved,
|
||||
+ cpusToReuse: make(map[string]cpuset.CPUSet),
|
||||
+ options: opts,
|
||||
}
|
||||
|
||||
allCPUs := topology.CPUDetails.CPUs()
|
||||
@@ -263,6 +269,25 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
|
||||
}
|
||||
|
||||
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
||||
+ // Process infra pods before guaranteed pods
|
||||
+ if isKubeInfra(pod) {
|
||||
+ // Container belongs in reserved pool.
|
||||
+ // We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error.
|
||||
+ if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
||||
+ klog.Infof("[cpumanager] static policy: reserved container already present in state, skipping (namespace: %s, pod UID: %s, pod: %s, container: %s)", pod.Namespace, string(pod.UID), pod.Name, container.Name)
|
||||
+ return nil
|
||||
+ }
|
||||
+
|
||||
+ cpuset := p.reserved
|
||||
+ if cpuset.IsEmpty() {
|
||||
+ // If this happens then someone messed up.
|
||||
+ return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved)
|
||||
+ }
|
||||
+ s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
||||
+ klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset)
|
||||
+ return nil
|
||||
+ }
|
||||
+
|
||||
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
|
||||
klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
// container belongs in an exclusively allocated pool
|
||||
@@ -382,6 +407,10 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
|
||||
if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() {
|
||||
return 0
|
||||
}
|
||||
+ // Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class
|
||||
+ if isKubeInfra(pod) {
|
||||
+ return 0
|
||||
+ }
|
||||
// Safe downcast to do for all systems with < 2.1 billion CPUs.
|
||||
// Per the language spec, `int` is guaranteed to be at least 32 bits wide.
|
||||
// https://golang.org/ref/spec#Numeric_types
|
||||
@@ -595,3 +624,13 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
|
||||
|
||||
return hints
|
||||
}
|
||||
+
|
||||
+// check if a given pod is in a platform infrastructure namespace
|
||||
+func isKubeInfra(pod *v1.Pod) bool {
|
||||
+ for _, namespace := range infraNamespaces {
|
||||
+ if namespace == pod.Namespace {
|
||||
+ return true
|
||||
+ }
|
||||
+ }
|
||||
+ return false
|
||||
+}
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
index 81251e576fd..d4b4b790210 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
@@ -886,7 +886,8 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
-
|
||||
+ infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m")
|
||||
+ infraPod.Namespace = "kube-system"
|
||||
testCases := []staticPolicyTestWithResvList{
|
||||
{
|
||||
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
|
||||
@@ -928,6 +929,22 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
expCPUAlloc: true,
|
||||
expCSet: cpuset.NewCPUSet(4, 5),
|
||||
},
|
||||
+ {
|
||||
+ description: "InfraPod, SingleSocketHT, ExpectAllocReserved",
|
||||
+ topo: topoSingleSocketHT,
|
||||
+ numReservedCPUs: 2,
|
||||
+ reserved: cpuset.NewCPUSet(0, 1),
|
||||
+ stAssignments: state.ContainerCPUAssignments{
|
||||
+ "fakePod": map[string]cpuset.CPUSet{
|
||||
+ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
||||
+ },
|
||||
+ },
|
||||
+ stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
|
||||
+ pod: infraPod,
|
||||
+ expErr: nil,
|
||||
+ expCPUAlloc: true,
|
||||
+ expCSet: cpuset.NewCPUSet(0, 1),
|
||||
+ },
|
||||
}
|
||||
|
||||
testExcl := true
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,562 @@
|
||||
From ae7fc7b39bfde784340068b388a13a28b4e76398 Mon Sep 17 00:00:00 2001
|
||||
From: Gleb Aronsky <gleb.aronsky@windriver.com>
|
||||
Date: Tue, 25 Jan 2022 13:27:25 -0500
|
||||
Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs
|
||||
|
||||
This introduces the concept of "isolated CPUs", which are CPUs that
|
||||
have been isolated at the kernel level via the "isolcpus" kernel boot
|
||||
parameter.
|
||||
|
||||
When starting the kubelet process, two separate sets of reserved CPUs
|
||||
may be specified. With this change CPUs reserved via
|
||||
'--system-reserved=cpu' will be used for infrastructure pods while the
|
||||
isolated CPUs should be reserved via '--kube-reserved=cpu' to cause
|
||||
kubelet to skip over them for "normal" CPU resource tracking. The
|
||||
kubelet code will double-check that the specified isolated CPUs match
|
||||
what the kernel exposes in "/sys/devices/system/cpu/isolated".
|
||||
|
||||
A plugin (outside the scope of this commit) will expose the isolated
|
||||
CPUs to kubelet via the device plugin API.
|
||||
|
||||
If a pod specifies some number of "isolcpus" resources, the device
|
||||
manager will allocate them. In this code we check whether such
|
||||
resources have been allocated, and if so we set the container cpuset to
|
||||
the isolated CPUs. This does mean that it really only makes sense to
|
||||
specify "isolcpus" resources for best-effort or burstable pods, not for
|
||||
guaranteed ones since that would throw off the accounting code. In
|
||||
order to ensure the accounting still works as designed, if "isolcpus"
|
||||
are specified for guaranteed pods, the affinity will be set to the
|
||||
non-isolated CPUs.
|
||||
|
||||
This patch was refactored in 1.21.3 due to upstream API change
|
||||
node: podresources: make GetDevices() consistent
|
||||
(commit ad68f9588c72d6477b5a290c548a9031063ac659).
|
||||
|
||||
The routine podIsolCPUs() was refactored in 1.21.3 since the API
|
||||
p.deviceManager.GetDevices() is returning multiple devices with
|
||||
a device per cpu. The resultant cpuset needs to be the aggregate.
|
||||
|
||||
The routine NewStaticPolicy was refactored in 1.22.5, adding a new argument
|
||||
in its signature: cpuPolicyOptions map[string]string. This change is implies
|
||||
shifting the new arguments(deviceManager, excludeReserved) with one position
|
||||
to the right.
|
||||
|
||||
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
||||
Co-authored-by: Chris Friesen <chris.friesen@windriver.com>
|
||||
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
|
||||
---
|
||||
pkg/kubelet/cm/container_manager_linux.go | 1 +
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++++++-
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 14 +++-
|
||||
pkg/kubelet/cm/cpumanager/policy_static.go | 83 +++++++++++++++++--
|
||||
.../cm/cpumanager/policy_static_test.go | 50 ++++++++---
|
||||
5 files changed, 164 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
|
||||
index 0f09f3eb331..770922ca55d 100644
|
||||
--- a/pkg/kubelet/cm/container_manager_linux.go
|
||||
+++ b/pkg/kubelet/cm/container_manager_linux.go
|
||||
@@ -321,6 +321,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
cm.GetNodeAllocatableReservation(),
|
||||
nodeConfig.KubeletRootDir,
|
||||
cm.topologyManager,
|
||||
+ cm.deviceManager,
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to initialize cpu manager")
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
index 884c7323a79..ea466dbcd37 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
@@ -18,7 +18,9 @@ package cpumanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
+ "io/ioutil"
|
||||
"math"
|
||||
+ "strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -32,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
+ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@@ -50,6 +53,25 @@ type policyName string
|
||||
// cpuManagerStateFileName is the file name where cpu manager stores its state
|
||||
const cpuManagerStateFileName = "cpu_manager_state"
|
||||
|
||||
+// get the system-level isolated CPUs
|
||||
+func getIsolcpus() cpuset.CPUSet {
|
||||
+ dat, err := ioutil.ReadFile("/sys/devices/system/cpu/isolated")
|
||||
+ if err != nil {
|
||||
+ klog.Errorf("[cpumanager] unable to read sysfs isolcpus subdir")
|
||||
+ return cpuset.NewCPUSet()
|
||||
+ }
|
||||
+
|
||||
+ // The isolated cpus string ends in a newline
|
||||
+ cpustring := strings.TrimSuffix(string(dat), "\n")
|
||||
+ cset, err := cpuset.Parse(cpustring)
|
||||
+ if err != nil {
|
||||
+ klog.Errorf("[cpumanager] unable to parse sysfs isolcpus string to cpuset")
|
||||
+ return cpuset.NewCPUSet()
|
||||
+ }
|
||||
+
|
||||
+ return cset
|
||||
+}
|
||||
+
|
||||
// Manager interface provides methods for Kubelet to manage pod cpus.
|
||||
type Manager interface {
|
||||
// Start is called during Kubelet initialization.
|
||||
@@ -153,7 +175,8 @@ func (s *sourcesReadyStub) AddSource(source string) {}
|
||||
func (s *sourcesReadyStub) AllReady() bool { return true }
|
||||
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
-func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
|
||||
+func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store, deviceManager devicemanager.Manager) (Manager, error) {
|
||||
+
|
||||
var topo *topology.CPUTopology
|
||||
var policy Policy
|
||||
var err error
|
||||
@@ -194,7 +217,15 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
|
||||
// NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset.
|
||||
// This variable is primarily to make testing easier.
|
||||
excludeReserved := true
|
||||
- policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions, excludeReserved)
|
||||
+
|
||||
+ // isolCPUs is the set of kernel-isolated CPUs. They should be a subset of specificCPUs or
|
||||
+ // of the CPUs that NewStaticPolicy() will pick if numReservedCPUs is set. It's only in the
|
||||
+ // argument list here for ease of testing, it's really internal to the policy.
|
||||
+ isolCPUs := getIsolcpus()
|
||||
+ policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, isolCPUs, affinity, cpuPolicyOptions, deviceManager, excludeReserved)
|
||||
+ if err != nil {
|
||||
+ return nil, fmt.Errorf("new static policy error: %v", err)
|
||||
+ }
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new static policy error: %w", err)
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
index 2c8349662c4..31e4d0585fb 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
+ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
)
|
||||
|
||||
@@ -215,6 +216,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string })
|
||||
}
|
||||
|
||||
func TestCPUManagerAdd(t *testing.T) {
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testExcl := false
|
||||
testPolicy, _ := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
@@ -230,8 +232,10 @@ func TestCPUManagerAdd(t *testing.T) {
|
||||
},
|
||||
0,
|
||||
cpuset.NewCPUSet(),
|
||||
+ cpuset.NewCPUSet(),
|
||||
topologymanager.NewFakeManager(),
|
||||
nil,
|
||||
+ testDM,
|
||||
testExcl)
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -482,8 +486,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
|
||||
}
|
||||
|
||||
testExcl := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl)
|
||||
|
||||
mockState := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -638,7 +643,9 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(sDir)
|
||||
|
||||
- mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
|
||||
+ testDM, err := devicemanager.NewManagerStub()
|
||||
+ mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager(), testDM)
|
||||
+
|
||||
if testCase.expectedError != nil {
|
||||
if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
|
||||
t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
|
||||
@@ -1232,6 +1239,7 @@ func TestReconcileState(t *testing.T) {
|
||||
// the following tests are with --reserved-cpus configured
|
||||
func TestCPUManagerAddWithResvList(t *testing.T) {
|
||||
testExcl := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testPolicy, _ := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 4,
|
||||
@@ -1246,8 +1254,10 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
|
||||
},
|
||||
1,
|
||||
cpuset.NewCPUSet(0),
|
||||
+ cpuset.NewCPUSet(),
|
||||
topologymanager.NewFakeManager(),
|
||||
nil,
|
||||
+ testDM,
|
||||
testExcl)
|
||||
testCases := []struct {
|
||||
description string
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
index a3c93a896df..d6fe69e7165 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
@@ -18,6 +18,7 @@ package cpumanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
+ "strconv"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
+ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
|
||||
)
|
||||
@@ -101,6 +103,10 @@ type staticPolicy struct {
|
||||
topology *topology.CPUTopology
|
||||
// set of CPUs that is not available for exclusive assignment
|
||||
reserved cpuset.CPUSet
|
||||
+ // subset of reserved CPUs with isolcpus attribute
|
||||
+ isolcpus cpuset.CPUSet
|
||||
+ // parent containerManager, used to get device list
|
||||
+ deviceManager devicemanager.Manager
|
||||
// If true, default CPUSet should exclude reserved CPUs
|
||||
excludeReserved bool
|
||||
// topology manager reference to get container Topology affinity
|
||||
@@ -117,7 +123,8 @@ var _ Policy = &staticPolicy{}
|
||||
// NewStaticPolicy returns a CPU manager policy that does not change CPU
|
||||
// assignments for exclusively pinned guaranteed containers after the main
|
||||
// container process starts.
|
||||
-func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, excludeReserved bool) (Policy, error) {
|
||||
+func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, isolCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, deviceManager devicemanager.Manager, excludeReserved bool) (Policy, error) {
|
||||
+
|
||||
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -128,6 +135,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
|
||||
policy := &staticPolicy{
|
||||
topology: topology,
|
||||
affinity: affinity,
|
||||
+ isolcpus: isolCPUs,
|
||||
+ deviceManager: deviceManager,
|
||||
excludeReserved: excludeReserved,
|
||||
cpusToReuse: make(map[string]cpuset.CPUSet),
|
||||
options: opts,
|
||||
@@ -154,6 +163,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
|
||||
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved)
|
||||
policy.reserved = reserved
|
||||
|
||||
+ if !isolCPUs.IsSubsetOf(reserved) {
|
||||
+ klog.Errorf("[cpumanager] isolCPUs %v is not a subset of reserved %v", isolCPUs, reserved)
|
||||
+ reserved = reserved.Union(isolCPUs)
|
||||
+ klog.Warningf("[cpumanager] mismatch isolCPUs %v, force reserved %v", isolCPUs, reserved)
|
||||
+ }
|
||||
+
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
@@ -187,8 +202,9 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
} else {
|
||||
s.SetDefaultCPUSet(allCPUs)
|
||||
}
|
||||
- klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n",
|
||||
- allCPUs, p.reserved, s.GetDefaultCPUSet())
|
||||
+ klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, isolcpus:%v, default:%v\n",
|
||||
+ allCPUs, p.reserved, p.isolcpus, s.GetDefaultCPUSet())
|
||||
+
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -278,10 +294,11 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
return nil
|
||||
}
|
||||
|
||||
- cpuset := p.reserved
|
||||
+ cpuset := p.reserved.Clone().Difference(p.isolcpus)
|
||||
if cpuset.IsEmpty() {
|
||||
// If this happens then someone messed up.
|
||||
- return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved)
|
||||
+ return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v, isolcpus:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved, p.isolcpus)
|
||||
+
|
||||
}
|
||||
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
||||
klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset)
|
||||
@@ -325,8 +342,34 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
}
|
||||
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
||||
p.updateCPUsToReuse(pod, container, cpuset)
|
||||
+ klog.Infof("[cpumanager] guaranteed: AddContainer "+
|
||||
+ "(namespace: %s, pod UID: %s, pod: %s, container: %s); numCPUS=%d, cpuset=%v",
|
||||
+ pod.Namespace, string(pod.UID), pod.Name, container.Name, numCPUs, cpuset)
|
||||
+ return nil
|
||||
+ }
|
||||
|
||||
+ if isolcpus := p.podIsolCPUs(pod, container); isolcpus.Size() > 0 {
|
||||
+ // container has requested isolated CPUs
|
||||
+ if set, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
||||
+ if set.Equals(isolcpus) {
|
||||
+ klog.Infof("[cpumanager] isolcpus container already present in state, skipping (namespace: %s, pod UID: %s, pod: %s, container: %s)",
|
||||
+ pod.Namespace, string(pod.UID), pod.Name, container.Name)
|
||||
+ return nil
|
||||
+ } else {
|
||||
+ klog.Infof("[cpumanager] isolcpus container state has cpus %v, should be %v (namespace: %s, pod UID: %s, pod: %s, container: %s)",
|
||||
+ isolcpus, set, pod.Namespace, string(pod.UID), pod.Name, container.Name)
|
||||
+ }
|
||||
+ }
|
||||
+ // Note that we do not do anything about init containers here.
|
||||
+ // It looks like devices are allocated per-pod based on effective requests/limits
|
||||
+ // and extra devices from initContainers are not freed up when the regular containers start.
|
||||
+ // TODO: confirm this is still true for 1.20
|
||||
+ s.SetCPUSet(string(pod.UID), container.Name, isolcpus)
|
||||
+ klog.Infof("[cpumanager] isolcpus: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v",
|
||||
+ pod.Namespace, string(pod.UID), pod.Name, container.Name, isolcpus)
|
||||
+ return nil
|
||||
}
|
||||
+
|
||||
// container belongs in the shared pool (nothing to do; use default cpuset)
|
||||
return nil
|
||||
}
|
||||
@@ -634,3 +677,33 @@ func isKubeInfra(pod *v1.Pod) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
+
|
||||
+// get the isolated CPUs (if any) from the devices associated with a specific container
|
||||
+func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet {
|
||||
+ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does
|
||||
+ // not create UID. We also need a way to properly stub devicemanager.
|
||||
+ if len(string(pod.UID)) == 0 {
|
||||
+ return cpuset.NewCPUSet()
|
||||
+ }
|
||||
+ resContDevices := p.deviceManager.GetDevices(string(pod.UID), container.Name)
|
||||
+ cpuSet := cpuset.NewCPUSet()
|
||||
+ for resourceName, resourceDevs := range resContDevices {
|
||||
+ // this resource name needs to match the isolcpus device plugin
|
||||
+ if resourceName == "windriver.com/isolcpus" {
|
||||
+ for devID, _ := range resourceDevs {
|
||||
+ cpuStrList := []string{devID}
|
||||
+ if len(cpuStrList) > 0 {
|
||||
+ // loop over the list of strings, convert each one to int, add to cpuset
|
||||
+ for _, cpuStr := range cpuStrList {
|
||||
+ cpu, err := strconv.Atoi(cpuStr)
|
||||
+ if err != nil {
|
||||
+ panic(err)
|
||||
+ }
|
||||
+ cpuSet = cpuSet.Union(cpuset.NewCPUSet(cpu))
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ return cpuSet
|
||||
+}
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
index d4b4b790210..ecd3e9598d0 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
+ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
|
||||
)
|
||||
@@ -65,8 +66,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
|
||||
}
|
||||
|
||||
func TestStaticPolicyName(t *testing.T) {
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testExcl := false
|
||||
- policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
||||
+ policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl)
|
||||
|
||||
policyName := policy.Name()
|
||||
if policyName != "static" {
|
||||
@@ -76,6 +78,7 @@ func TestStaticPolicyName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStaticPolicyStart(t *testing.T) {
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testCases := []staticPolicyTest{
|
||||
{
|
||||
description: "non-corrupted state",
|
||||
@@ -151,7 +154,7 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
- p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
||||
+ p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testCase.excludeReserved)
|
||||
|
||||
policy := p.(*staticPolicy)
|
||||
st := &mockState{
|
||||
@@ -199,7 +202,7 @@ func TestStaticPolicyAdd(t *testing.T) {
|
||||
largeTopoCPUSet := largeTopoBuilder.Result()
|
||||
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
|
||||
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
|
||||
-
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
// these are the cases which must behave the same regardless the policy options.
|
||||
// So we will permutate the options to ensure this holds true.
|
||||
optionsInsensitiveTestCases := []staticPolicyTest{
|
||||
@@ -529,8 +532,9 @@ func TestStaticPolicyAdd(t *testing.T) {
|
||||
}
|
||||
|
||||
func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testExcl := false
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testExcl)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testDM, testExcl)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -596,7 +600,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, excludeReserved)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -629,6 +633,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
|
||||
|
||||
func TestStaticPolicyRemove(t *testing.T) {
|
||||
excludeReserved := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testCases := []staticPolicyTest{
|
||||
{
|
||||
description: "SingleSocketHT, DeAllocOneContainer",
|
||||
@@ -710,6 +715,7 @@ func TestStaticPolicyRemove(t *testing.T) {
|
||||
|
||||
func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
||||
excludeReserved := false
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
testCases := []struct {
|
||||
description string
|
||||
topo *topology.CPUTopology
|
||||
@@ -778,7 +784,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
- p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, excludeReserved)
|
||||
+ p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, excludeReserved)
|
||||
+
|
||||
policy := p.(*staticPolicy)
|
||||
st := &mockState{
|
||||
assignments: tc.stAssignments,
|
||||
@@ -811,6 +818,7 @@ type staticPolicyTestWithResvList struct {
|
||||
topo *topology.CPUTopology
|
||||
numReservedCPUs int
|
||||
reserved cpuset.CPUSet
|
||||
+ isolcpus cpuset.CPUSet
|
||||
stAssignments state.ContainerCPUAssignments
|
||||
stDefaultCPUSet cpuset.CPUSet
|
||||
pod *v1.Pod
|
||||
@@ -821,6 +829,8 @@ type staticPolicyTestWithResvList struct {
|
||||
}
|
||||
|
||||
func TestStaticPolicyStartWithResvList(t *testing.T) {
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
+ testExcl := false
|
||||
testCases := []staticPolicyTestWithResvList{
|
||||
{
|
||||
description: "empty cpuset",
|
||||
@@ -850,11 +860,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
|
||||
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
|
||||
},
|
||||
}
|
||||
- testExcl := false
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
- p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
||||
-
|
||||
+ p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl)
|
||||
if !reflect.DeepEqual(err, testCase.expNewErr) {
|
||||
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
|
||||
testCase.description, testCase.expNewErr, err)
|
||||
@@ -894,6 +902,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
topo: topoSingleSocketHT,
|
||||
numReservedCPUs: 1,
|
||||
reserved: cpuset.NewCPUSet(0),
|
||||
+ isolcpus: cpuset.NewCPUSet(),
|
||||
stAssignments: state.ContainerCPUAssignments{},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
|
||||
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
|
||||
@@ -906,6 +915,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
topo: topoSingleSocketHT,
|
||||
numReservedCPUs: 2,
|
||||
reserved: cpuset.NewCPUSet(0, 1),
|
||||
+ isolcpus: cpuset.NewCPUSet(),
|
||||
stAssignments: state.ContainerCPUAssignments{},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
|
||||
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
|
||||
@@ -918,6 +928,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
topo: topoSingleSocketHT,
|
||||
numReservedCPUs: 2,
|
||||
reserved: cpuset.NewCPUSet(0, 1),
|
||||
+ isolcpus: cpuset.NewCPUSet(),
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"fakePod": map[string]cpuset.CPUSet{
|
||||
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
||||
@@ -934,6 +945,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
topo: topoSingleSocketHT,
|
||||
numReservedCPUs: 2,
|
||||
reserved: cpuset.NewCPUSet(0, 1),
|
||||
+ isolcpus: cpuset.NewCPUSet(),
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"fakePod": map[string]cpuset.CPUSet{
|
||||
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
||||
@@ -945,11 +957,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
expCPUAlloc: true,
|
||||
expCSet: cpuset.NewCPUSet(0, 1),
|
||||
},
|
||||
+ {
|
||||
+ description: "InfraPod, SingleSocketHT, Isolcpus, ExpectAllocReserved",
|
||||
+ topo: topoSingleSocketHT,
|
||||
+ numReservedCPUs: 2,
|
||||
+ reserved: cpuset.NewCPUSet(0, 1),
|
||||
+ isolcpus: cpuset.NewCPUSet(1),
|
||||
+ stAssignments: state.ContainerCPUAssignments{
|
||||
+ "fakePod": map[string]cpuset.CPUSet{
|
||||
+ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
||||
+ },
|
||||
+ },
|
||||
+ stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
|
||||
+ pod: infraPod,
|
||||
+ expErr: nil,
|
||||
+ expCPUAlloc: true,
|
||||
+ expCSet: cpuset.NewCPUSet(0),
|
||||
+ },
|
||||
}
|
||||
|
||||
testExcl := true
|
||||
+ testDM, _ := devicemanager.NewManagerStub()
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, testCase.isolcpus, topologymanager.NewFakeManager(), nil, testDM, testExcl)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,303 @@
|
||||
From c827ea3b075774c9c72c33d38c973d05276cb7ac Mon Sep 17 00:00:00 2001
|
||||
From: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
Date: Mon, 3 Oct 2022 19:22:14 -0400
|
||||
Subject: [PATCH] kubelet cpumanager keep normal containers off reserved CPUs
|
||||
|
||||
When starting the kubelet process, two separate sets of reserved CPUs
|
||||
may be specified. With this change CPUs reserved via
|
||||
'--system-reserved=cpu'
|
||||
or '--kube-reserved=cpu' will be ignored by kubernetes itself. A small
|
||||
tweak to the default CPU affinity ensures that "normal" Kubernetes
|
||||
pods won't run on the reserved CPUs.
|
||||
|
||||
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
||||
Signed-off-by: Sachin Gopala Krishna <saching.krishna@windriver.com>
|
||||
|
||||
---
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager.go | 6 +++-
|
||||
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 11 ++++--
|
||||
pkg/kubelet/cm/cpumanager/policy_static.go | 29 +++++++++++----
|
||||
.../cm/cpumanager/policy_static_test.go | 36 ++++++++++++++-----
|
||||
4 files changed, 63 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
index df431b06601..884c7323a79 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
||||
@@ -191,7 +191,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
|
||||
// exclusively allocated.
|
||||
reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000
|
||||
numReservedCPUs := int(math.Ceil(reservedCPUsFloat))
|
||||
- policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions)
|
||||
+ // NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset.
|
||||
+ // This variable is primarily to make testing easier.
|
||||
+ excludeReserved := true
|
||||
+ policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions, excludeReserved)
|
||||
+
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new static policy error: %w", err)
|
||||
}
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
index 9b3e24fc3b2..2c8349662c4 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
||||
@@ -215,6 +215,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string })
|
||||
}
|
||||
|
||||
func TestCPUManagerAdd(t *testing.T) {
|
||||
+ testExcl := false
|
||||
testPolicy, _ := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 4,
|
||||
@@ -230,7 +231,8 @@ func TestCPUManagerAdd(t *testing.T) {
|
||||
0,
|
||||
cpuset.NewCPUSet(),
|
||||
topologymanager.NewFakeManager(),
|
||||
- nil)
|
||||
+ nil,
|
||||
+ testExcl)
|
||||
testCases := []struct {
|
||||
description string
|
||||
updateErr error
|
||||
@@ -479,8 +481,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
+ testExcl := false
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
||||
|
||||
mockState := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -1228,6 +1231,7 @@ func TestReconcileState(t *testing.T) {
|
||||
// above test cases are without kubelet --reserved-cpus cmd option
|
||||
// the following tests are with --reserved-cpus configured
|
||||
func TestCPUManagerAddWithResvList(t *testing.T) {
|
||||
+ testExcl := false
|
||||
testPolicy, _ := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 4,
|
||||
@@ -1243,7 +1247,8 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
|
||||
1,
|
||||
cpuset.NewCPUSet(0),
|
||||
topologymanager.NewFakeManager(),
|
||||
- nil)
|
||||
+ nil,
|
||||
+ testExcl)
|
||||
testCases := []struct {
|
||||
description string
|
||||
updateErr error
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
index a872b389c46..09e0fc0ea0e 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
||||
@@ -96,6 +96,8 @@ type staticPolicy struct {
|
||||
topology *topology.CPUTopology
|
||||
// set of CPUs that is not available for exclusive assignment
|
||||
reserved cpuset.CPUSet
|
||||
+ // If true, default CPUSet should exclude reserved CPUs
|
||||
+ excludeReserved bool
|
||||
// topology manager reference to get container Topology affinity
|
||||
affinity topologymanager.Store
|
||||
// set of CPUs to reuse across allocations in a pod
|
||||
@@ -110,7 +112,7 @@ var _ Policy = &staticPolicy{}
|
||||
// NewStaticPolicy returns a CPU manager policy that does not change CPU
|
||||
// assignments for exclusively pinned guaranteed containers after the main
|
||||
// container process starts.
|
||||
-func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) {
|
||||
+func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, excludeReserved bool) (Policy, error) {
|
||||
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -172,7 +174,15 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
}
|
||||
// state is empty initialize
|
||||
allCPUs := p.topology.CPUDetails.CPUs()
|
||||
- s.SetDefaultCPUSet(allCPUs)
|
||||
+ if p.excludeReserved {
|
||||
+ // Exclude reserved CPUs from the default CPUSet to keep containers off them
|
||||
+ // unless explicitly affined.
|
||||
+ s.SetDefaultCPUSet(allCPUs.Difference(p.reserved))
|
||||
+ } else {
|
||||
+ s.SetDefaultCPUSet(allCPUs)
|
||||
+ }
|
||||
+ klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n",
|
||||
+ allCPUs, p.reserved, s.GetDefaultCPUSet())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -180,11 +190,12 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
// 1. Check if the reserved cpuset is not part of default cpuset because:
|
||||
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
|
||||
// - user tampered with file
|
||||
- if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
||||
- return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
|
||||
- p.reserved.String(), tmpDefaultCPUset.String())
|
||||
+ if !p.excludeReserved {
|
||||
+ if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
||||
+ return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
|
||||
+ p.reserved.String(), tmpDefaultCPUset.String())
|
||||
+ }
|
||||
}
|
||||
-
|
||||
// 2. Check if state for static policy is consistent
|
||||
for pod := range tmpAssignments {
|
||||
for container, cset := range tmpAssignments[pod] {
|
||||
@@ -211,6 +222,9 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
}
|
||||
}
|
||||
totalKnownCPUs = totalKnownCPUs.UnionAll(tmpCPUSets)
|
||||
+ if p.excludeReserved {
|
||||
+ totalKnownCPUs = totalKnownCPUs.Union(p.reserved)
|
||||
+ }
|
||||
if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) {
|
||||
return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"",
|
||||
p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String())
|
||||
@@ -310,6 +324,9 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
||||
cpusInUse := getAssignedCPUsOfSiblings(s, podUID, containerName)
|
||||
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
|
||||
s.Delete(podUID, containerName)
|
||||
+ if p.excludeReserved {
|
||||
+ toRelease = toRelease.Difference(p.reserved)
|
||||
+ }
|
||||
// Mutate the shared pool, adding released cpus.
|
||||
toRelease = toRelease.Difference(cpusInUse)
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
|
||||
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
index 4e3255fff01..edfb40d880e 100644
|
||||
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
||||
@@ -33,6 +33,7 @@ type staticPolicyTest struct {
|
||||
description string
|
||||
topo *topology.CPUTopology
|
||||
numReservedCPUs int
|
||||
+ excludeReserved bool
|
||||
podUID string
|
||||
options map[string]string
|
||||
containerName string
|
||||
@@ -64,7 +65,8 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
|
||||
}
|
||||
|
||||
func TestStaticPolicyName(t *testing.T) {
|
||||
- policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ testExcl := false
|
||||
+ policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
||||
|
||||
policyName := policy.Name()
|
||||
if policyName != "static" {
|
||||
@@ -94,6 +96,15 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(),
|
||||
expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
|
||||
},
|
||||
+ {
|
||||
+ description: "empty cpuset exclude reserved",
|
||||
+ topo: topoDualSocketHT,
|
||||
+ numReservedCPUs: 2,
|
||||
+ excludeReserved: true,
|
||||
+ stAssignments: state.ContainerCPUAssignments{},
|
||||
+ stDefaultCPUSet: cpuset.NewCPUSet(),
|
||||
+ expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
|
||||
+ },
|
||||
{
|
||||
description: "reserved cores 0 & 6 are not present in available cpuset",
|
||||
topo: topoDualSocketHT,
|
||||
@@ -140,7 +151,8 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
- p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
||||
+
|
||||
policy := p.(*staticPolicy)
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -517,7 +529,8 @@ func TestStaticPolicyAdd(t *testing.T) {
|
||||
}
|
||||
|
||||
func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options)
|
||||
+ testExcl := false
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testExcl)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -583,7 +596,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
@@ -615,6 +628,7 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStaticPolicyRemove(t *testing.T) {
|
||||
+ excludeReserved := false
|
||||
testCases := []staticPolicyTest{
|
||||
{
|
||||
description: "SingleSocketHT, DeAllocOneContainer",
|
||||
@@ -695,6 +709,7 @@ func TestStaticPolicyRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
||||
+ excludeReserved := false
|
||||
testCases := []struct {
|
||||
description string
|
||||
topo *topology.CPUTopology
|
||||
@@ -763,7 +778,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
- p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
||||
+ p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, excludeReserved)
|
||||
policy := p.(*staticPolicy)
|
||||
st := &mockState{
|
||||
assignments: tc.stAssignments,
|
||||
@@ -835,9 +850,11 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
|
||||
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
|
||||
},
|
||||
}
|
||||
+ testExcl := false
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
- p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
|
||||
+ p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
||||
+
|
||||
if !reflect.DeepEqual(err, testCase.expNewErr) {
|
||||
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
|
||||
testCase.description, testCase.expNewErr, err)
|
||||
@@ -877,7 +894,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
numReservedCPUs: 1,
|
||||
reserved: cpuset.NewCPUSet(0),
|
||||
stAssignments: state.ContainerCPUAssignments{},
|
||||
- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
|
||||
+ stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
|
||||
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
|
||||
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
|
||||
expCPUAlloc: false,
|
||||
@@ -889,7 +906,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
numReservedCPUs: 2,
|
||||
reserved: cpuset.NewCPUSet(0, 1),
|
||||
stAssignments: state.ContainerCPUAssignments{},
|
||||
- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
|
||||
+ stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
|
||||
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
|
||||
expErr: nil,
|
||||
expCPUAlloc: true,
|
||||
@@ -913,8 +930,9 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
+ testExcl := true
|
||||
for _, testCase := range testCases {
|
||||
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
|
||||
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
||||
|
||||
st := &mockState{
|
||||
assignments: testCase.stAssignments,
|
||||
--
|
||||
2.25.1
|
||||
|
@ -7,3 +7,9 @@ kubeadm-create-platform-pods-with-zero-CPU-resources.patch
|
||||
Revert-use-subpath-for-coredns-only-for-default-repo.patch
|
||||
kubernetes-make-isolcpus-allocation-SMT-aware.patch
|
||||
kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch
|
||||
kubelet-cpumanager-disable-CFS-quota-throttling.patch
|
||||
kubelet-cpumanager-keep-normal-containers-off-reserv.patch
|
||||
kubelet-cpumanager-infra-pods-use-system-reserved-CP.patch
|
||||
kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch
|
||||
enable-support-for-kubernetes-to-ignore-isolcpus.patch
|
||||
cpumanager-policy-static-test-refactor.patch
|
||||
|
Loading…
x
Reference in New Issue
Block a user