TCPG-2705 automation pt 2

add remaining compute node-affinity scenario for TCPG-2705

Change-Id: I626de235654acac3a01ea51788e57b9a6d6ddc5a
Signed-off-by: Gabriel Calixto de Paula <gabrielcalixto9@gmail.com>
This commit is contained in:
Gabriel Calixto de Paula
2025-05-12 11:59:23 -04:00
parent 2709f067b5
commit c2f7f16679
3 changed files with 98 additions and 13 deletions

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-node-affinity-namespace
---
apiVersion: v1
kind: Pod
metadata:
name: node-affinity-to-compute-pod
namespace: test-node-affinity-namespace
labels:
app: node-affinity-example
spec:
containers:
- name: nginx
image: nginx:latest
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: DoesNotExist

View File

@@ -1,7 +1,14 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-node-affinity-namespace
---
apiVersion: v1
kind: Pod
metadata:
name: node-affinity-to-controller-pod
namespace: test-node-affinity-namespace
labels:
app: node-affinity-example
spec:

View File

@@ -1,4 +1,4 @@
from pytest import FixtureRequest
from pytest import FixtureRequest, mark
from framework.logging.automation_logger import get_logger
from framework.resources.resource_finder import get_stx_resource_path
@@ -6,6 +6,7 @@ from framework.ssh.ssh_connection import SSHConnection
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
from keywords.files.file_keywords import FileKeywords
from keywords.k8s.files.kubectl_file_delete_keywords import KubectlFileDeleteKeywords
from keywords.k8s.node.kubectl_describe_node_keywords import KubectlDescribeNodeKeywords
from keywords.k8s.pods.kubectl_apply_pods_keywords import KubectlApplyPodsKeywords
from keywords.k8s.pods.kubectl_get_pods_keywords import KubectlGetPodsKeywords
@@ -19,7 +20,7 @@ def copy_affinity_files(request: FixtureRequest, ssh_connection: SSHConnection):
ssh_connection (SSHConnection): ssh connection object
"""
node_affinity_dir = "node_affinity"
dashboard_file_names = ["pod_affinity_controller.yaml"]
dashboard_file_names = ["pod_affinity_controller.yaml", "pod_affinity_compute.yaml"]
get_logger().log_info("Creating pod_affinity directory")
FileKeywords(ssh_connection).create_directory(f"/home/sysadmin/{node_affinity_dir}")
for dashboard_file_name in dashboard_file_names:
@@ -35,7 +36,7 @@ def copy_affinity_files(request: FixtureRequest, ssh_connection: SSHConnection):
def test_node_affinity_controller(request: FixtureRequest):
"""
Test the functionality of the node affinity controller by applying
Test the functionality of the node affinity to a controller by applying
the necessary Kubernetes YAML configuration and verifying the behavior.
@@ -44,39 +45,92 @@ def test_node_affinity_controller(request: FixtureRequest):
- Copy test files from local to the SystemController.
- Check the copies on the SystemController.
Step 2: Apply the pod affinity YAML file using kubectl.
Step 3: Verify the pod affinity behavior by checking the status of the pods.
Step 3: Verify the pod affinity behavior(affinity to controller) by checking the status of the pods.
Step 4: Clean up the test environment by deleting the created resources (teardown).
Args:
request (FixtureRequest): pytest fixture for managing test setup and teardown
"""
# Step 1: Transfer the dashboard files to the active controller
# Defines dashboard file name, source (local) and destination (remote) file paths.
# Opens an SSH session to active controller.
get_logger().log_test_case_step("Step 1: Transfer the dashboard files to the active controller")
pod_name = "node-affinity-to-controller-pod"
namespace = "test-node-affinity-namespace"
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
copy_affinity_files(request, ssh_connection)
get_logger().log_info("Running test_node_affinity_controller")
def teardown():
# Step 4: Clean up the test environment by deleting the created resources (teardown).
get_logger().log_info("Deleting test pods")
get_logger().log_test_case_step("Step 4: Clean up the test environment by deleting the created resources (teardown).")
KubectlFileDeleteKeywords(ssh_connection).delete_resources("/home/sysadmin/node_affinity/pod_affinity_controller.yaml")
request.addfinalizer(teardown)
# For example, you can run the copied yaml file using kubectl command
get_logger().log_test_case_step("Step 2: Apply the pod affinity YAML file using kubectl.")
KubectlApplyPodsKeywords(ssh_connection).apply_from_yaml("/home/sysadmin/node_affinity/pod_affinity_controller.yaml")
KubectlGetPodsKeywords(ssh_connection).wait_for_pod_status(
pod_name=pod_name,
namespace="default",
namespace=namespace,
expected_status="Running",
timeout=60,
)
get_logger().log_info("Pod (node-affinity-to-controller-pod) is running")
# Verify the pod affinity behavior by checking the status of the pods
get_logger().log_info("Verifying the pod affinity behavior")
pods = KubectlGetPodsKeywords(ssh_connection).get_pods()
get_logger().log_test_case_step("Step 3: Verify the pod affinity " "behavior(affinity to controller) by checking the status of the pods")
pods = KubectlGetPodsKeywords(ssh_connection).get_pods(namespace=namespace)
node_affinity_pod = pods.get_pod(pod_name)
assert node_affinity_pod is not None, f"Pod {pod_name} not found"
get_logger().log_info(f"Pod {node_affinity_pod.get_name()} is running on node {node_affinity_pod.get_node()}")
assert "controller" in node_affinity_pod.get_node(), f"Pod {node_affinity_pod.get_name()} is not running on controller host"
pod_node_role = KubectlDescribeNodeKeywords(ssh_connection).describe_node(node_affinity_pod.get_node()).get_node_description().get_roles()
# check if host has control-plane role, if so, the node is a controller
assert "control-plane" in pod_node_role, f"Pod {node_affinity_pod.get_name()} is not running on controller host"
@mark.lab_has_compute
def test_node_affinity_compute(request: FixtureRequest):
"""
Test the functionality of the node affinity to a compute by applying
the necessary Kubernetes YAML configuration and verifying the behavior.
Test Steps:
Step 1: Transfer the pod affinity files to the active controller (setup)
- Copy test files from local to the SystemController.
- Check the copies on the SystemController.
Step 2: Apply the pod affinity YAML file using kubectl.
Step 3: Verify the pod affinity behavior(affinity to compute) by checking the status of the pods.
Step 4: Clean up the test environment by deleting the created resources (teardown).
Args:
request (FixtureRequest): pytest fixture for managing test setup and teardown
"""
# Step 1: Transfer the dashboard files to the active controller
get_logger().log_test_case_step("Step 1: Transfer the dashboard files to the active controller")
pod_name = "node-affinity-to-compute-pod"
namespace = "test-node-affinity-namespace"
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
copy_affinity_files(request, ssh_connection)
get_logger().log_info("Running test_node_affinity_compute")
def teardown():
# Step 4: Clean up the test environment by deleting the created resources (teardown).
get_logger().log_test_case_step("Step 4: Clean up the test environment by deleting the created resources (teardown).")
KubectlFileDeleteKeywords(ssh_connection).delete_resources("/home/sysadmin/node_affinity/pod_affinity_compute.yaml")
request.addfinalizer(teardown)
get_logger().log_test_case_step("Step 2: Apply the pod affinity YAML file using kubectl.")
KubectlApplyPodsKeywords(ssh_connection).apply_from_yaml("/home/sysadmin/node_affinity/pod_affinity_compute.yaml")
KubectlGetPodsKeywords(ssh_connection).wait_for_pod_status(
pod_name=pod_name,
namespace=namespace,
expected_status="Running",
timeout=60,
)
get_logger().log_info("Pod (node-affinity-to-compute-pod) is running")
# Verify the pod affinity behavior by checking the status of the pods
get_logger().log_test_case_step("Step 3: Verify the pod affinity " "behavior(affinity to compute) by checking the status of the pods")
pods = KubectlGetPodsKeywords(ssh_connection).get_pods(namespace=namespace)
node_affinity_pod = pods.get_pod(pod_name)
assert node_affinity_pod is not None, f"Pod {pod_name} not found"
get_logger().log_info(f"Pod {node_affinity_pod.get_name()} is running on node {node_affinity_pod.get_node()}")
pod_node_role = KubectlDescribeNodeKeywords(ssh_connection).describe_node(node_affinity_pod.get_node()).get_node_description().get_roles()
# check if host has control-plane role, if so, the node is a controller
assert "control-plane" not in pod_node_role, f"Pod {node_affinity_pod.get_name()} is not running on controller host"