Add a pre-upgrade job to resize Loki PVCs

This commit is contained in:
Matt Pryor 2023-03-15 09:45:56 +00:00
parent c4a00aabae
commit a960dec59f
2 changed files with 137 additions and 29 deletions

View File

@ -1,4 +1,26 @@
{{- if and .Values.monitoring.enabled .Values.monitoring.lokiStack.enabled }}
{{-
$lokiEnabled :=
and
.Values.monitoring.enabled
.Values.monitoring.lokiStack.enabled
}}
{{-
$lokiPersistenceEnabled :=
dig
"loki"
"persistence"
"enabled"
false
.Values.monitoring.lokiStack.release.values
}}
{{-
$lokiVolumeSize :=
ternary
.Values.monitoring.lokiStack.release.values.loki.persistence.size
""
$lokiPersistenceEnabled
}}
{{- if $lokiEnabled }}
---
apiVersion: v1
kind: Secret
@ -126,3 +148,101 @@ spec:
loki-metrics-dashboard.json: |
{{- .Files.Get "grafana-dashboards/loki-metrics-dashboard.json" | nindent 12 }}
{{- end }}
{{- if .Release.IsUpgrade }}
---
# This hook checks whether we need to resize PVC volumes
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "cluster-addons.componentName" (list . "loki-stack-manage-pvcs") }}
labels: {{ include "cluster-addons.componentLabels" (list . "loki-stack-manage-pvcs") | nindent 4 }}
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: {{ .Values.hooks.backoffLimit }}
activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }}
template:
metadata:
labels: {{ include "cluster-addons.componentSelectorLabels" (list . "loki-stack-manage-pvcs") | nindent 8 }}
spec:
{{- with .Values.hooks.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml .Values.hooks.podSecurityContext | nindent 8 }}
restartPolicy: OnFailure
containers:
- name: manage-pvcs
image: {{
printf "%s:%s"
.Values.hooks.image.repository
(default .Chart.AppVersion .Values.hooks.image.tag)
}}
imagePullPolicy: {{ .Values.hooks.image.pullPolicy }}
securityContext: {{ toYaml .Values.hooks.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -ex
test -f "$KUBECONFIG" || exit 0
kubectl version || exit 0
{{- if $lokiEnabled }}
EXISTING_SIZE="$(
kubectl get statefulset loki-stack \
--namespace {{ .Values.monitoring.lokiStack.release.namespace }} \
--ignore-not-found \
--output jsonpath='{.spec.volumeClaimTemplates[0].spec.resources.requests.storage}'
)"
if [ "$EXISTING_SIZE" != "{{ $lokiVolumeSize }}" ]; then
kubectl delete statefulset loki-stack \
--namespace {{ .Values.monitoring.lokiStack.release.namespace }} \
--ignore-not-found
fi
{{- end }}
{{- if and $lokiEnabled $lokiPersistenceEnabled }}
pvcs=($( \
kubectl get pvc \
--namespace {{ .Values.monitoring.lokiStack.release.namespace }} \
--selector app=loki,release=loki-stack \
--output name \
))
for pvc in "${pvcs[@]}"; do
kubectl patch $pvc \
--namespace {{ .Values.monitoring.lokiStack.release.namespace }} \
--patch '{"spec": {"resources": {"requests": {"storage": "{{ $lokiVolumeSize }}"}}}}'
done
{{- else }}
kubectl delete pvc \
--namespace {{ .Values.monitoring.lokiStack.release.namespace }} \
--selector app=loki,release=loki-stack \
--wait=false
{{- end }}
env:
- name: KUBECONFIG
value: /etc/kubernetes/config
resources: {{ toYaml .Values.hooks.resources | nindent 12 }}
volumeMounts:
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: {{ .Values.hooks.hostNetwork }}
{{- with .Values.hooks.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.hooks.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.hooks.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: etc-kubernetes
secret:
secretName: {{ include "cluster-addons.componentName" (list . "kubeconfig") }}
items:
- key: value
path: config
{{- end }}

View File

@ -18,31 +18,7 @@ RUN apt-get update && \
apt-get install -y curl git jq python3 python3-pip tini && \
rm -rf /var/lib/apt/lists/*
ARG KUBECTL_VN_1_22=v1.22.13
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_22}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.22; \
chmod +x /usr/bin/kubectl-v1.22; \
/usr/bin/kubectl-v1.22 version --client
ARG KUBECTL_VN_1_23=v1.23.10
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_23}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.23; \
chmod +x /usr/bin/kubectl-v1.23; \
/usr/bin/kubectl-v1.23 version --client
ARG KUBECTL_VN_1_24=v1.24.4
ARG KUBECTL_VN_1_24=v1.24.11
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
@ -54,7 +30,7 @@ RUN set -ex; \
chmod +x /usr/bin/kubectl-v1.24; \
/usr/bin/kubectl-v1.24 version --client
ARG KUBECTL_VN_1_25=v1.25.0
ARG KUBECTL_VN_1_25=v1.25.7
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
@ -66,10 +42,22 @@ RUN set -ex; \
chmod +x /usr/bin/kubectl-v1.25; \
/usr/bin/kubectl-v1.25 version --client
ARG KUBECTL_VN_1_26=v1.26.2
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_26}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.26; \
chmod +x /usr/bin/kubectl-v1.26; \
/usr/bin/kubectl-v1.26 version --client
ENV HELM_CACHE_HOME /tmp/helm/cache
ENV HELM_CONFIG_HOME /tmp/helm/config
ENV HELM_DATA_HOME /tmp/helm/data
ARG HELM_VERSION=v3.9.4
ARG HELM_VERSION=v3.11.2
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
@ -81,7 +69,7 @@ RUN set -ex; \
tar -xz --strip-components 1 -C /usr/bin linux-${helm_arch}/helm; \
helm version
ENV KUBECTL_VN_LATEST v1.25
ENV KUBECTL_VN_LATEST v1.26
COPY ./bin/* /usr/bin/
USER $UTILS_UID