Add open-vcsr sample app for factory install

This commit adds two files to be executed in the setup stage of
factory install, aiming to install a customized application.
These files serve as examples of how to update the setup and config
content in factory-install.

The files added are:
- 20-vcsr-setup: Script added to the setup directory, executed after the
  system setup script. It performs pre-checks, installs the sample app,
  and verifies that the pods are Running.
- open-vcsr-example.yaml: YAML manifest applied by the script above,
  located in the config directory.

Test Plan:
PASS: Run a factory-install with these changes. Verify the new script
      runs successfully and, at the end of the stage, the new
      application is running as expected.

Story: 2011455
Task: 52572

Change-Id: I921c2dc6d32771ec282328fd1121b446e8c2136b
Signed-off-by: Enzo Candotti <Enzo.Candotti@windriver.com>
This commit is contained in:
Enzo Candotti
2025-07-23 15:19:46 -03:00
parent e9828b0d72
commit a6a0784ef1
2 changed files with 435 additions and 0 deletions

View File

@@ -0,0 +1,227 @@
#
# Copyright (c) 2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# =========================================================
# vCSR GENERIC DEPLOYMENT TEMPLATE (Kubernetes/StarlingX)
# =========================================================
# This YAML manifest demonstrates how to deploy a Virtual Cell
# Site Router (vCSR) pod connected to multiple networks using
# SR-IOV, and VLAN interfaces in StarlingX.
#
# HOW TO CUSTOMIZE FOR YOUR ENVIRONMENT:
#
# For each NetworkAttachmentDefinition (NAD) below, you MUST review
# and update the following fields according to your network setup:
#
# - vcsr-midhaul-uplink:
# - "vlanId":
# - Must match the VLAN ID configured on the switch side.
# - **DO NOT** use the factory/enrollment VLANs from StarlingX
# - "ipam"."addresses":
# - Set IPv4 and/or IPv6 addresses according to your deployment.
# - Can be IPv4-only, IPv6-only, or dual-stack.
# - The address must belong to the switch's gateway subnet.
# - "ipam"."routes":
# - Set default gateway(s) as appropriate (IPv4 and/or IPv6).
# - If using IPv6, be sure to add the default route with a low metric
# (e.g. "metric": "1")—the CNI plugin does not do this automatically.
# This is also handled in the DaemonSet's initContainer.
#
# - vcsr-midhaul-oam-gateway and vcsr-midhaul-admin-gateway:
# - "vlanId":
# - Must match the VLAN IDs used in your subcloud enrollment.
# - "ipam"."addresses":
# - Set IPv4 and/or IPv6 addresses that fit within the selected subnet
# from enrollment.
#
# NOTE:
# Additional images are expected to be included in the image
# bundle used to generate the pre-staged ISO for this application:
#
# docker.io/busybox:latest,bootstrap
# centos/tools:latest,bootstrap
#
# If you use different images, make sure to add them to the image
# bundle as well.
#
# This template should be freely extended, reduced, or otherwise modified
# for your production deployment.
# =========================================================================
---
# This NAD connects a subcloud to the DC router via SR-IOV.
# It will be exposed inside the pod as interface net1.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: vcsr-midhaul-vf
namespace: vcsr
annotations:
k8s.v1.cni.cncf.io/resourceName: intel.com/pci_sriov_net_vcsr_midhaul
spec:
config: |
{
"cniVersion": "0.3.0",
"plugins": [
{
"type": "sriov",
"name": "vcsr-midhaul",
"trust": "on",
"spoofchk": "off"
}
]
}
---
# This NAD connects the pod to the subcloud uplink/WAN network (VLAN).
# It will be exposed inside the pod as interface net2.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: vcsr-midhaul-uplink
namespace: vcsr
spec:
config: |
{
"cniVersion": "0.3.0",
"plugins": [
{
"type": "vlan",
"name": "uplink0",
"master": "net1",
"mtu": 1500,
"vlanId": 22,
"linkInContainer": true,
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.10.251.2/24" },
{ "address": "fdff:10:10:251:0::2/64"}
],
"routes": [
{ "dst": "0.0.0.0/0", "gw": "10.10.251.1" },
{ "dst": "::/0", "gw": "fdff:10:10:251:0::1", "metric": "1" }
]
}
}
]
}
---
# This NAD connects the pod to the subcloud OAM network (VLAN).
# Will appear as another interface inside the pod (e.g., net3).
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: vcsr-midhaul-oam-gateway
namespace: vcsr
spec:
config: |
{
"cniVersion": "0.3.0",
"plugins": [
{
"type": "vlan",
"name": "oamgw0",
"master": "net1",
"mtu": 1500,
"vlanId": 5,
"linkInContainer": true,
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.10.252.1/24" },
{ "address": "fdff:10:10:252:0::1/64"}
]
}
}
]
}
---
# This NAD connects the pod to the subcloud ADMIN/MGMT network (VLAN).
# Will appear as another interface inside the pod (e.g., net4).
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: vcsr-midhaul-admin-gateway
namespace: vcsr
spec:
config: |
{
"cniVersion": "0.3.0",
"plugins": [
{
"type": "vlan",
"name": "admingw0",
"master": "net1",
"mtu": 1500,
"vlanId": 6,
"linkInContainer": true,
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.10.253.1/24" },
{ "address": "fdff:10:10:253:0::1/64"}
]
}
}
]
}
---
#############################################################
# DaemonSet to deploy the vCSR pod on every node.
# You can replace DaemonSet with Deployment/StatefulSet as needed.
#############################################################
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: open-vcsr
name: open-vcsr
namespace: vcsr
spec:
selector:
matchLabels:
app: open-vcsr
template:
metadata:
labels:
app: open-vcsr
restart-on-reboot: "true"
annotations:
# This annotation lists all the NADs attached to the pod.
# The order matches net1, net2, net3, ... inside the container.
k8s.v1.cni.cncf.io/networks: '[
{"name":"vcsr-midhaul-vf"},
{"name":"vcsr-midhaul-uplink"},
{"name":"vcsr-midhaul-oam-gateway"},
{"name":"vcsr-midhaul-admin-gateway"}
]'
spec:
containers:
- image: centos/tools
imagePullPolicy: IfNotPresent
name: open-vcsr
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
securityContext:
privileged: true
resources:
requests:
intel.com/pci_sriov_net_vcsr_midhaul: '1' # Requests one SR-IOV VF
limits:
intel.com/pci_sriov_net_vcsr_midhaul: '1'
initContainers:
- name: sysctl-init
image: busybox:latest
command: [ "/bin/sh", "-c" ]
securityContext:
privileged: true
args:
- |
# Enable forwarding in the node for L3 routing (IPv4 and IPv6)
sysctl -w net.ipv6.conf.all.forwarding=1
sysctl -w net.ipv4.ip_forward=1
# Add IPv6 default route with low metric (if required for your config)
ip route add default via fdff:10:10:251:0::1 metric 1
# Add any extra routes here if needed

View File

@@ -0,0 +1,208 @@
#!/bin/bash
#
# Copyright (c) 2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# ====================================================================================
# vCSR Sample Setup Script - Generic Deployment Example for a Virtual Cell Site Router
# ====================================================================================
#
# This script demonstrates how to deploy a customized vCSR (Virtual Cell Site Router)
# solution using Kubernetes on StarlingX.
#
# It is meant as an example: adapt it according to the tarball
# or YAML file name, the network names, and any specific deployment logic.
#
# The accompanying YAML manifest can include NetworkAttachmentDefinitions (NADs),
# pod/DaemonSet definitions, and additional configuration as needed.
#
# Main steps:
# 1. Check that the required networks exist in StarlingX.
# 2. Create the target Kubernetes namespace if not present.
# 3. Copy the default registry secret from kube-system to the new namespace
# 4. Apply the YAML manifest with all NADs and pods.
# 5. Wait for pods to reach Ready state.
#
# NOTE:
# Replace this file with your own logic to install any additional applications
# required for your deployment.
#
# Delete this file if vCSR or any other custom application is not expected to
# be installed during factory install.
# ====================================================================================
#
# If you wish to perform additional reconfiguration during subcloud enrollment,
# you must create a tarball with the following directory structure:
#
# cloud-init-config/
# └── scripts/
# ├── 20-vCSR-reconfig
# └── additional-vCSR-config-file-or-dir
#
# - The reconfiguration script should be placed under the "scripts" directory.
# - The script name must have a numeric prefix greater than 10, as the
# "10-platform-reconfig" script is reserved for StarlingX software reconfiguration.
#
# During enrollment, specify this tarball file using the --cloud-init-config option.
#
# Example:
# dcmanager subcloud add --enroll --cloud-init-config <your-tarball>.tar.gz ...
# ====================================================================================
HOME_DIR="/home/sysadmin" # Directory to look for manifests/files
KUBE_CONFIG="/etc/kubernetes/admin.conf" # Path to Kubernetes config
VCSR_PREFIX="open-vcsr" # Prefix for the YAML manifest name
VCSR_NAMESPACE="vcsr" # Namespace where vCSR will be deployed
# Automatically finds the first YAML matching the prefix (update the logic if needed)
VCSR_YAML_FILE_PATH=$(
find "${HOME_DIR}" -maxdepth 1 -type f -name "${VCSR_PREFIX}*.yaml" 2>/dev/null
)
# Logging functions
function check_rc_die {
local -i rc=${1}
msg=${2}
if [ ${rc} -ne 0 ]; then
log_fatal "${msg} [rc=${rc}]"
fi
}
function log_fatal {
echo "$(date +"%Y-%m-%d %H:%M:%S,%3N - vCSR-setup -") FATAL: ${*}"
exit 1
}
function log_warn {
echo "$(date +"%Y-%m-%d %H:%M:%S,%3N - vCSR-setup -") WARN: ${*}"
}
function log_info {
echo "$(date +"%Y-%m-%d %H:%M:%S,%3N - vCSR-setup -") INFO: ${*}"
}
# Load platform environment variables (required for StarlingX CLI)
if [ -f /etc/platform/openrc ]; then
source /etc/platform/openrc
else
log_fatal "Platform environment file /etc/platform/openrc not found"
fi
# Check Kubernetes config exists
if [ ! -f "${KUBE_CONFIG}" ]; then
log_fatal "Kubernetes configuration file ${KUBE_CONFIG} not found."
fi
# Export kubeconfig for kubectl
export KUBECONFIG=${KUBE_CONFIG}
# Waits for all pods in the namespace to reach Ready
wait_for_pod_ready() {
local timeout_seconds=300
log_info "Waiting for vCSR pods in namespace ${VCSR_NAMESPACE} to become Ready..."
kubectl -n ${VCSR_NAMESPACE} wait pod \
--for=condition=Ready \
--all \
--timeout=${timeout_seconds}s
local rc=$?
if [ $rc -ne 0 ]; then
log_fatal "Timeout: Not all vCSR pods became Ready within \
${timeout_seconds} seconds."
fi
log_info "All vCSR pods are Ready."
}
# Checks that the required network exists; adjust the network name if needed
check_required_networks() {
local output
output=$(system datanetwork-list)
log_info "Checking for required data network 'vcsr-midhaul' (adjust as needed)..."
if [ -z "$output" ]; then
log_fatal "No data networks configured on the system. \
Please ensure the networks are created before running this script."
fi
# MODIFY this network name if needed
if echo "$output" | grep -q "vcsr-midhaul" ; then
log_info "vcsr-midhaul network is present."
else
log_fatal "Required network 'vcsr-midhaul' is missing."
fi
}
# Copies the default registry secret from kube-system namespace to vCSR namespace
copy_registry_secret() {
local SECRET_NAME="default-registry-key"
local SRC_NS="kube-system"
local DEST_NS="${VCSR_NAMESPACE}"
ensure_namespace
log_info "Copying secret '$SECRET_NAME' from '$SRC_NS' to '$DEST_NS'..."
# Check that the secret exists in the source namespace
if ! kubectl get secret "$SECRET_NAME" -n "$SRC_NS" >/dev/null 2>&1; then
log_fatal "Secret '$SECRET_NAME' not found in namespace '$SRC_NS'."
fi
# Export, fix namespace, and apply to the destination namespace
kubectl get secret "$SECRET_NAME" -n "$SRC_NS" -o yaml | \
sed "s/namespace: $SRC_NS/namespace: $DEST_NS/" | \
kubectl apply -f -
check_rc_die $? "Failed to copy secret '$SECRET_NAME' to '$DEST_NS'."
log_info "Secret '$SECRET_NAME' successfully copied to '$DEST_NS'."
}
# Creates the namespace in Kubernetes if it does not exist
ensure_namespace() {
if ! kubectl get namespace "${VCSR_NAMESPACE}" >/dev/null 2>&1; then
log_info "Namespace ${VCSR_NAMESPACE} does not exist. Creating it..."
kubectl create namespace "${VCSR_NAMESPACE}"
check_rc_die $? "Failed to create namespace ${VCSR_NAMESPACE}"
else
log_info "Namespace ${VCSR_NAMESPACE} already exists."
fi
}
# Applies the YAML manifest
install_vcsr() {
log_info "Installing vCSR application..."
check_required_networks
if [ -z "$VCSR_YAML_FILE_PATH" ]; then
log_fatal "No vCSR YAML file found in ${HOME_DIR} matching ${VCSR_PREFIX}*.yaml"
fi
log_info "Applying manifest: ${VCSR_YAML_FILE_PATH}"
kubectl -n ${VCSR_NAMESPACE} apply -f ${VCSR_YAML_FILE_PATH}
check_rc_die $? "Failed to apply vCSR yaml manifest."
# Wait for all pods to be ready
wait_for_pod_ready
}
log_info "Starting to apply the open vCSR application..."
# Ensure that the required networks are present
check_required_networks
# Copy the default registry secret to ensure that vCSR can pull images
# from the registry.local
copy_registry_secret
install_vcsr
log_info "Open vCSR application deployment completed successfully"
exit 0