Repair validate-site job

This job doesn't work properly since a lot of changes in the
airshipctl logic were applied. All the issues were addressed.

Change-Id: Iec6fa7e6a3aa1ab46d496a8fd63822df1f8124cc
Signed-off-by: Ruslan Aliev <raliev@mirantis.com>
Relates-To: #19
This commit is contained in:
Ruslan Aliev 2021-01-31 14:52:43 -06:00
parent 7bf93ba699
commit 54256dbc70
10 changed files with 73 additions and 54 deletions

View File

@ -2,8 +2,8 @@
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: manager-bootstrap-credentials name: capg-manager-bootstrap-credentials
namespace: system namespace: capg-system
type: Opaque type: Opaque
data: data:
GCP_CONTROL_PLANE_MACHINE_TYPE: ${GCP_CONTROL_PLANE_MACHINE_TYPE} GCP_CONTROL_PLANE_MACHINE_TYPE: ${GCP_CONTROL_PLANE_MACHINE_TYPE}

View File

@ -1,3 +1,4 @@
resources: resources:
- metadata.yaml - metadata.yaml
patchesStrategicMerge:
- capg-resources.yaml - capg-resources.yaml

View File

@ -7,7 +7,8 @@ bases:
- crd - crd
- webhook # Disable this if you're not using the webhook functionality. - webhook # Disable this if you're not using the webhook functionality.
- default - default
- data # needs to be disabled since env variables aren't imported properly
#- data
# Enable this when conversion webhooks are implemented # Enable this when conversion webhooks are implemented
#patchesJson6902: #patchesJson6902:

View File

@ -8,6 +8,7 @@ phases:
- name: initinfra-networking-ephemeral - name: initinfra-networking-ephemeral
- name: clusterctl-init-ephemeral - name: clusterctl-init-ephemeral
- name: controlplane-ephemeral - name: controlplane-ephemeral
- name: clusterctl-init-target
- name: initinfra-target - name: initinfra-target
- name: initinfra-networking-target - name: initinfra-networking-target
- name: workers-target - name: workers-target

View File

@ -15,6 +15,18 @@
- vars/test-config.yaml - vars/test-config.yaml
environment: environment:
SOPS_IMPORT_PGP: "{{ airship_config_pgp }}" SOPS_IMPORT_PGP: "{{ airship_config_pgp }}"
SOPS_PGP_FP: "FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4"
AZURE_SUBSCRIPTION_ID_B64: "UGxlYXNlLCBwcm92aWRlIHlvdXIgQXp1cmUgc3Vic2NyaXB0aW9uIGlkIGhlcmUK"
AZURE_TENANT_ID_B64: "UGxlYXNlLCBwcm92aWRlIHlvdXIgQXp1cmUgdGVuYW50IGlkIGhlcmUK"
AZURE_CLIENT_ID_B64: "UGxlYXNlLCBwcm92aWRlIHlvdXIgQXp1cmUgc2VydmljZSBwcmluY2lwYWwgaWQgaGVyZQo="
AZURE_CLIENT_SECRET_B64: "UGxlYXNlLCBwcm92aWRlIHlvdXIgQXp1cmUgc2VydmljZSBwcmluY2lwYWwgc2VjcmV0IGhlcmUK"
AZURE_ENVIRONMENT: "AzurePublicCloud"
GCP_CONTROL_PLANE_MACHINE_TYPE: "bjEtc3RhbmRhcmQtNA=="
GCP_NODE_MACHINE_TYPE: "bjEtc3RhbmRhcmQtNA=="
GCP_PROJECT: "bjEtc3RhbmRhcmQtNA=="
GCP_REGION: "dXMtd2VzdDE="
GCP_NETWORK_NAME: "ZGVmYXVsdA=="
GCP_B64ENCODED_CREDENTIALS: "bjEtc3RhbmRhcmQtNA=="
tasks: tasks:
- name: "set default gate scripts" - name: "set default gate scripts"
set_fact: set_fact:

View File

@ -15,7 +15,7 @@
# This downloads kind, puts it in a temp directory, and prints the directory # This downloads kind, puts it in a temp directory, and prints the directory
set -e set -e
: ${KIND_URL:="https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64"} : ${KIND_URL:="https://kind.sigs.k8s.io/dl/v0.10.0/kind-$(uname)-amd64"}
TMP=$(mktemp -d) TMP=$(mktemp -d)
KIND="${TMP}/kind" KIND="${TMP}/kind"

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -e set -xe
# This starts up a kubernetes cluster which is sufficient for # This starts up a kubernetes cluster which is sufficient for
# assisting with tasks like `kubectl apply --dry-run` style validation # assisting with tasks like `kubectl apply --dry-run` style validation
# Usage # Usage

View File

@ -16,14 +16,14 @@ set -xe
# The root of the manifest structure to be validated. # The root of the manifest structure to be validated.
# This corresponds to the targetPath in an airshipctl config # This corresponds to the targetPath in an airshipctl config
: ${MANIFEST_ROOT:="$(dirname "${PWD}")"} : ${MANIFEST_ROOT:="$(basename "${PWD}")/manifests"}
# The location of sites whose manifests should be validated. # The location of sites whose manifests should be validated.
# This are relative to MANIFEST_ROOT above # This are relative to MANIFEST_ROOT above
: ${SITE_ROOT:="$(basename "${PWD}")/manifests/site"} : ${SITE_ROOT:="$(basename "${PWD}")/manifests/site"}
: ${SITE:="test-workload"} : ${SITE:="test-workload"}
: ${CONTEXT:="kind-airship"} : ${CONTEXT:="kind-airship"}
: ${KUBECONFIG:="${HOME}/.airship/kubeconfig"} : ${AIRSHIPKUBECONFIG:="${HOME}/.airship/kubeconfig"}
: ${KUBECTL:="/usr/local/bin/kubectl"} : ${KUBECTL:="/usr/local/bin/kubectl"}
TMP=$(mktemp -d) TMP=$(mktemp -d)
@ -37,16 +37,16 @@ else
fi fi
: ${AIRSHIPCONFIG:="${TMP}/config"} : ${AIRSHIPCONFIG:="${TMP}/config"}
: ${AIRSHIPKUBECONFIG:="${TMP}/kubeconfig"} : ${KUBECONFIG:="${TMP}/kubeconfig"}
: ${AIRSHIPCTL:="${AIRSHIPCTL_DEFAULT}"} : ${AIRSHIPCTL:="${AIRSHIPCTL_DEFAULT}"}
ACTL="${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} --kubeconfig ${AIRSHIPKUBECONFIG}" ACTL="${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} --kubeconfig ${KUBECONFIG}"
export KUBECONFIG export KUBECONFIG
# TODO: use `airshipctl config` to do this once all the needed knobs are exposed # TODO: use `airshipctl config` to do this once all the needed knobs are exposed
# The non-default parts are to set the targetPath appropriately, # The non-default parts are to set the targetPath appropriately,
# and to craft up cluster/contexts to avoid the need for automatic kubectl reconciliation # and to craft up cluster/contexts to avoid the need for automatic kubectl reconciliation
function generate_airshipconf { function generate_airshipconf() {
cluster=$1 cluster=$1
cat <<EOL >${AIRSHIPCONFIG} cat <<EOL >${AIRSHIPCONFIG}
@ -74,55 +74,59 @@ manifests:
commitHash: "" commitHash: ""
force: false force: false
tag: "" tag: ""
url: https://opendev.org/airship/treasuremap url: https://review.opendev.org/airship/airshipctl
targetPath: ${MANIFEST_ROOT} targetPath: ${MANIFEST_ROOT}
metadataPath: manifests/site/${SITE}/metadata.yaml
EOL EOL
} }
function cleanup() { function cleanup() {
${KIND} delete cluster --name airship ${KIND} delete cluster --name $CLUSTER
rm -rf ${TMP} rm -rf ${TMP}
} }
trap cleanup EXIT trap cleanup EXIT
generate_airshipconf "default"
phase_plans=$(airshipctl --airshipconf ${AIRSHIPCONFIG} plan list | grep "PhasePlan" | awk -F '/' '{print $2}' | awk '{print $1}')
for plan in $phase_plans; do
cluster_list=$(airshipctl --airshipconf ${AIRSHIPCONFIG} cluster list)
# Loop over all cluster types and phases for the given site # Loop over all cluster types and phases for the given site
for cluster in ephemeral target; do for cluster in $cluster_list; do
if [[ -d "${MANIFEST_ROOT}/${SITE_ROOT}/${SITE}/${cluster}" ]]; then
echo -e "\n**** Rendering phases for cluster: ${cluster}" echo -e "\n**** Rendering phases for cluster: ${cluster}"
# Since we'll be mucking with the kubeconfig - make a copy of it and muck with the copy
cp ${AIRSHIPKUBECONFIG} ${KUBECONFIG}
export CLUSTER="${cluster}"
# Start a fresh, empty kind cluster for validating documents # Start a fresh, empty kind cluster for validating documents
./tools/document/start_kind.sh ./tools/document/start_kind.sh
# Since we'll be mucking with the kubeconfig - make a copy of it and muck with the copy
cp ${KUBECONFIG} ${AIRSHIPKUBECONFIG}
# This is a big hack to work around kubeconfig reconciliation
# change the cluster name (as well as context and user) to avoid kubeconfig reconciliation
sed -i "s/${CONTEXT}/${CONTEXT}_${cluster}/" ${AIRSHIPKUBECONFIG}
generate_airshipconf ${cluster} generate_airshipconf ${cluster}
# A sequential list of potential phases. A fancier attempt at this has been # A sequential list of potential phases. A fancier attempt at this has been
# removed since it was choking in certain cases and got to be more trouble than was worth. # removed since it was choking in certain cases and got to be more trouble than was worth.
# This should be removed once we have a phase map that is smarter. # This should be removed once we have a phase map that is smarter.
# In the meantime, as new phases are added, please add them here as well. # In the meantime, as new phases are added, please add them here as well.
phases="initinfra-ephemeral controlplane-ephemeral initinfra-target workers-target" phases=$(airshipctl --airshipconf ${AIRSHIPCONFIG} phase list --plan $plan -c $cluster | grep Phase | awk -F '/' '{print $2}' || true)
for phase in $phases; do for phase in $phases; do
# Guard against bootstrap or initinfra being missing, which could be the case for some configs # Guard against bootstrap or initinfra being missing, which could be the case for some configs
if [ -d "${MANIFEST_ROOT}/${SITE_ROOT}/${SITE}/${cluster}/${phase}" ]; then
echo -e "\n*** Rendering ${cluster}/${phase}" echo -e "\n*** Rendering ${cluster}/${phase}"
# step 1: actually apply all crds in the phase # step 1: actually apply all crds in the phase
# TODO: will need to loop through phases in order, eventually # TODO: will need to loop through phases in order, eventually
# e.g., load CRDs from initinfra first, so they're present when validating later phases # e.g., load CRDs from initinfra first, so they're present when validating later phases
${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} phase render ${phase} -k CustomResourceDefinition > ${TMP}/${phase}-crds.yaml ${AIRSHIPCTL} --airshipconf ${AIRSHIPCONFIG} phase render ${phase} -s executor -k CustomResourceDefinition >${TMP}/${phase}-crds.yaml
if [ -s ${TMP}/${phase}-crds.yaml ]; then if [ -s ${TMP}/${phase}-crds.yaml ]; then
${KUBECTL} --context ${CONTEXT} --kubeconfig ${KUBECONFIG} apply -f ${TMP}/${phase}-crds.yaml ${KUBECTL} --context ${CLUSTER} --kubeconfig ${KUBECONFIG} apply -f ${TMP}/${phase}-crds.yaml
fi fi
# step 2: dry-run the entire phase # step 2: dry-run the entire phase
${ACTL} phase run --dry-run ${phase} ${ACTL} phase run --dry-run ${phase}
fi
done done
${KIND} delete cluster --name airship ${KIND} delete cluster --name $CLUSTER
fi done
done done

View File

@ -34,7 +34,6 @@ for site_root in ${SITE_ROOTS}; do
echo -e "\nValidating site: ${MANIFEST_ROOT}/${site_root}/${site}\n****************" echo -e "\nValidating site: ${MANIFEST_ROOT}/${site_root}/${site}\n****************"
MANIFEST_ROOT=${MANIFEST_ROOT} SITE_ROOT=${site_root} SITE=${site} \ MANIFEST_ROOT=${MANIFEST_ROOT} SITE_ROOT=${site_root} SITE=${site} \
./tools/document/validate_site_docs.sh ./tools/document/validate_site_docs.sh
echo "Validation of site ${site} is succesful!" echo "Validation of site ${site} is successful!"
done done
done done

View File

@ -80,8 +80,9 @@
gate_scripts: gate_scripts:
- ./tools/deployment/01_install_kubectl.sh - ./tools/deployment/01_install_kubectl.sh
- ./tools/deployment/21_systemwide_executable.sh - ./tools/deployment/21_systemwide_executable.sh
- ./tools/deployment/22_test_configs.sh
- ./tools/validate_docs - ./tools/validate_docs
voting: false voting: true
- job: - job:
name: airship-airshipctl-functional-existing-k8s name: airship-airshipctl-functional-existing-k8s