79db1f3eed
Changes for adding Kubernetes 1.23.1 in StarlingX, including build environment updates. The package builds successfully. Built and installed an iso with K8s 1.23.1 on AIO-SX. Depends-On: https://review.opendev.org/c/starlingx/compile/+/825651 Story: 2009830 Task: 44424 Change-Id: I3e2b793d7b88057fc597b2445bddd137bb2b4fcf Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
312 lines
14 KiB
Diff
312 lines
14 KiB
Diff
From f2186adb445f1420184aa5af5536bb777536a798 Mon Sep 17 00:00:00 2001
|
|
From: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
Date: Mon, 24 Jan 2022 15:52:24 -0500
|
|
Subject: [PATCH] kubelet cpumanager keep normal containers off reserved CPUs
|
|
|
|
When starting the kubelet process, two separate sets of reserved CPUs
|
|
may be specified. With this change CPUs reserved via
|
|
'--system-reserved=cpu'
|
|
or '--kube-reserved=cpu' will be ignored by kubernetes itself. A small
|
|
tweak to the default CPU affinity ensures that "normal" Kubernetes
|
|
pods won't run on the reserved CPUs.
|
|
|
|
Co-authored-by: Jim Gauld <james.gauld@windriver.com>
|
|
Signed-off-by: Gleb Aronsky <gleb.aronsky@windriver.com>
|
|
---
|
|
pkg/kubelet/cm/cpumanager/cpu_manager.go | 6 ++-
|
|
pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 11 ++++--
|
|
pkg/kubelet/cm/cpumanager/policy_static.go | 29 +++++++++++---
|
|
.../cm/cpumanager/policy_static_test.go | 38 ++++++++++++++-----
|
|
4 files changed, 64 insertions(+), 20 deletions(-)
|
|
|
|
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
index 6ad289336ea..8336e7b0fd4 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go
|
|
@@ -191,7 +191,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
|
|
// exclusively allocated.
|
|
reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000
|
|
numReservedCPUs := int(math.Ceil(reservedCPUsFloat))
|
|
- policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions)
|
|
+ // NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset.
|
|
+ // This variable is primarily to make testing easier.
|
|
+ excludeReserved := true
|
|
+ policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions, excludeReserved)
|
|
+
|
|
if err != nil {
|
|
return nil, fmt.Errorf("new static policy error: %w", err)
|
|
}
|
|
diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
|
index 9b3e24fc3b2..2c8349662c4 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
|
|
@@ -215,6 +215,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string })
|
|
}
|
|
|
|
func TestCPUManagerAdd(t *testing.T) {
|
|
+ testExcl := false
|
|
testPolicy, _ := NewStaticPolicy(
|
|
&topology.CPUTopology{
|
|
NumCPUs: 4,
|
|
@@ -230,7 +231,8 @@ func TestCPUManagerAdd(t *testing.T) {
|
|
0,
|
|
cpuset.NewCPUSet(),
|
|
topologymanager.NewFakeManager(),
|
|
- nil)
|
|
+ nil,
|
|
+ testExcl)
|
|
testCases := []struct {
|
|
description string
|
|
updateErr error
|
|
@@ -479,8 +481,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
|
|
},
|
|
}
|
|
|
|
+ testExcl := false
|
|
for _, testCase := range testCases {
|
|
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
|
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
|
|
|
mockState := &mockState{
|
|
assignments: testCase.stAssignments,
|
|
@@ -1228,6 +1231,7 @@ func TestReconcileState(t *testing.T) {
|
|
// above test cases are without kubelet --reserved-cpus cmd option
|
|
// the following tests are with --reserved-cpus configured
|
|
func TestCPUManagerAddWithResvList(t *testing.T) {
|
|
+ testExcl := false
|
|
testPolicy, _ := NewStaticPolicy(
|
|
&topology.CPUTopology{
|
|
NumCPUs: 4,
|
|
@@ -1243,7 +1247,8 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
|
|
1,
|
|
cpuset.NewCPUSet(0),
|
|
topologymanager.NewFakeManager(),
|
|
- nil)
|
|
+ nil,
|
|
+ testExcl)
|
|
testCases := []struct {
|
|
description string
|
|
updateErr error
|
|
diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
index f7ff26cd313..9697f4d4bb0 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/policy_static.go
|
|
@@ -96,6 +96,8 @@ type staticPolicy struct {
|
|
topology *topology.CPUTopology
|
|
// set of CPUs that is not available for exclusive assignment
|
|
reserved cpuset.CPUSet
|
|
+ // If true, default CPUSet should exclude reserved CPUs
|
|
+ excludeReserved bool
|
|
// topology manager reference to get container Topology affinity
|
|
affinity topologymanager.Store
|
|
// set of CPUs to reuse across allocations in a pod
|
|
@@ -110,7 +112,7 @@ var _ Policy = &staticPolicy{}
|
|
// NewStaticPolicy returns a CPU manager policy that does not change CPU
|
|
// assignments for exclusively pinned guaranteed containers after the main
|
|
// container process starts.
|
|
-func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) {
|
|
+func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, excludeReserved bool) (Policy, error) {
|
|
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
|
|
if err != nil {
|
|
return nil, err
|
|
@@ -172,7 +174,15 @@ func (p *staticPolicy) validateState(s state.State) error {
|
|
}
|
|
// state is empty initialize
|
|
allCPUs := p.topology.CPUDetails.CPUs()
|
|
- s.SetDefaultCPUSet(allCPUs)
|
|
+ if p.excludeReserved {
|
|
+ // Exclude reserved CPUs from the default CPUSet to keep containers off them
|
|
+ // unless explicitly affined.
|
|
+ s.SetDefaultCPUSet(allCPUs.Difference(p.reserved))
|
|
+ } else {
|
|
+ s.SetDefaultCPUSet(allCPUs)
|
|
+ }
|
|
+ klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n",
|
|
+ allCPUs, p.reserved, s.GetDefaultCPUSet())
|
|
return nil
|
|
}
|
|
|
|
@@ -180,11 +190,12 @@ func (p *staticPolicy) validateState(s state.State) error {
|
|
// 1. Check if the reserved cpuset is not part of default cpuset because:
|
|
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
|
|
// - user tampered with file
|
|
- if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
|
- return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
|
|
- p.reserved.String(), tmpDefaultCPUset.String())
|
|
+ if !p.excludeReserved {
|
|
+ if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
|
+ return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
|
|
+ p.reserved.String(), tmpDefaultCPUset.String())
|
|
+ }
|
|
}
|
|
-
|
|
// 2. Check if state for static policy is consistent
|
|
for pod := range tmpAssignments {
|
|
for container, cset := range tmpAssignments[pod] {
|
|
@@ -211,6 +222,9 @@ func (p *staticPolicy) validateState(s state.State) error {
|
|
}
|
|
}
|
|
totalKnownCPUs = totalKnownCPUs.UnionAll(tmpCPUSets)
|
|
+ if p.excludeReserved {
|
|
+ totalKnownCPUs = totalKnownCPUs.Union(p.reserved)
|
|
+ }
|
|
if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) {
|
|
return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"",
|
|
p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String())
|
|
@@ -296,6 +310,9 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
|
klog.InfoS("Static policy: RemoveContainer", "podUID", podUID, "containerName", containerName)
|
|
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
|
|
s.Delete(podUID, containerName)
|
|
+ if p.excludeReserved {
|
|
+ toRelease = toRelease.Difference(p.reserved)
|
|
+ }
|
|
// Mutate the shared pool, adding released cpus.
|
|
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
|
|
}
|
|
diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
index d2b641fe3a0..80bd04a1f92 100644
|
|
--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
+++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go
|
|
@@ -33,6 +33,7 @@ type staticPolicyTest struct {
|
|
description string
|
|
topo *topology.CPUTopology
|
|
numReservedCPUs int
|
|
+ excludeReserved bool
|
|
podUID string
|
|
options map[string]string
|
|
containerName string
|
|
@@ -64,7 +65,8 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
|
|
}
|
|
|
|
func TestStaticPolicyName(t *testing.T) {
|
|
- policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
|
+ testExcl := false
|
|
+ policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl)
|
|
|
|
policyName := policy.Name()
|
|
if policyName != "static" {
|
|
@@ -94,6 +96,15 @@ func TestStaticPolicyStart(t *testing.T) {
|
|
stDefaultCPUSet: cpuset.NewCPUSet(),
|
|
expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
|
|
},
|
|
+ {
|
|
+ description: "empty cpuset exclude reserved",
|
|
+ topo: topoDualSocketHT,
|
|
+ numReservedCPUs: 2,
|
|
+ excludeReserved: true,
|
|
+ stAssignments: state.ContainerCPUAssignments{},
|
|
+ stDefaultCPUSet: cpuset.NewCPUSet(),
|
|
+ expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
|
|
+ },
|
|
{
|
|
description: "reserved cores 0 & 6 are not present in available cpuset",
|
|
topo: topoDualSocketHT,
|
|
@@ -140,7 +151,8 @@ func TestStaticPolicyStart(t *testing.T) {
|
|
}
|
|
for _, testCase := range testCases {
|
|
t.Run(testCase.description, func(t *testing.T) {
|
|
- p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
|
+ p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
|
+
|
|
policy := p.(*staticPolicy)
|
|
st := &mockState{
|
|
assignments: testCase.stAssignments,
|
|
@@ -211,7 +223,7 @@ func TestStaticPolicyAdd(t *testing.T) {
|
|
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
|
|
},
|
|
},
|
|
- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
|
|
+ stDefaultCPUSet: cpuset.NewCPUSet(4, 5),
|
|
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
|
|
expErr: nil,
|
|
expCPUAlloc: true,
|
|
@@ -517,7 +529,8 @@ func TestStaticPolicyAdd(t *testing.T) {
|
|
}
|
|
|
|
func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
|
|
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options)
|
|
+ testExcl := false
|
|
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testExcl)
|
|
|
|
st := &mockState{
|
|
assignments: testCase.stAssignments,
|
|
@@ -559,6 +572,7 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
|
|
}
|
|
|
|
func TestStaticPolicyRemove(t *testing.T) {
|
|
+ excludeReserved := false
|
|
testCases := []staticPolicyTest{
|
|
{
|
|
description: "SingleSocketHT, DeAllocOneContainer",
|
|
@@ -617,7 +631,7 @@ func TestStaticPolicyRemove(t *testing.T) {
|
|
}
|
|
|
|
for _, testCase := range testCases {
|
|
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
|
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved)
|
|
|
|
st := &mockState{
|
|
assignments: testCase.stAssignments,
|
|
@@ -639,6 +653,7 @@ func TestStaticPolicyRemove(t *testing.T) {
|
|
}
|
|
|
|
func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
|
+ excludeReserved := false
|
|
testCases := []struct {
|
|
description string
|
|
topo *topology.CPUTopology
|
|
@@ -707,7 +722,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
|
|
},
|
|
}
|
|
for _, tc := range testCases {
|
|
- p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
|
|
+ p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, excludeReserved)
|
|
policy := p.(*staticPolicy)
|
|
st := &mockState{
|
|
assignments: tc.stAssignments,
|
|
@@ -779,9 +794,11 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
|
|
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
|
|
},
|
|
}
|
|
+ testExcl := false
|
|
for _, testCase := range testCases {
|
|
t.Run(testCase.description, func(t *testing.T) {
|
|
- p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
|
|
+ p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
|
+
|
|
if !reflect.DeepEqual(err, testCase.expNewErr) {
|
|
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
|
|
testCase.description, testCase.expNewErr, err)
|
|
@@ -821,7 +838,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
|
numReservedCPUs: 1,
|
|
reserved: cpuset.NewCPUSet(0),
|
|
stAssignments: state.ContainerCPUAssignments{},
|
|
- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
|
|
+ stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
|
|
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
|
|
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
|
|
expCPUAlloc: false,
|
|
@@ -833,7 +850,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
|
numReservedCPUs: 2,
|
|
reserved: cpuset.NewCPUSet(0, 1),
|
|
stAssignments: state.ContainerCPUAssignments{},
|
|
- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
|
|
+ stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7),
|
|
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
|
|
expErr: nil,
|
|
expCPUAlloc: true,
|
|
@@ -857,8 +874,9 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
|
|
},
|
|
}
|
|
|
|
+ testExcl := true
|
|
for _, testCase := range testCases {
|
|
- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
|
|
+ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl)
|
|
|
|
st := &mockState{
|
|
assignments: testCase.stAssignments,
|
|
--
|
|
2.25.1
|
|
|