Migrate to self hosted using charts
This change includes several interconnected features: * Migration to Deckhand-based configuration. This is integrated here, because new configuration data were needed, so it would have been wasted effort to either implement it in the old format or to update the old configuration data to Dechkand format. * Failing faster with stronger validation. Migration to Deckhand configuration was a good opportunity to add schema validation, which is a requirement in the near term anyway. Additionally, rendering all templates up front adds an additional layer of "fail-fast". * Separation of certificate generation and configuration assembly into different commands. Combined with Deckhand substitution, this creates a much clearer distinction between Promenade configuration and deployable secrets. * Migration of components to charts. This is a key step that will enable support for dynamic node management. Additionally, this paves the way for significant configurability in component deployment. * Version of kubelet is configurable & controlled via download url. * Restructuring templates to be more intuitive. Many of the templates require changes or deletion due to the migration to charts. * Installation of pre-configured useful tools on hosts, including calicoctl. * DNS is now provided by coredns, which is highly configurable. Change-Id: I9f2d8da6346f4308be5083a54764ce6035a2e10c
This commit is contained in:
parent
b3de5e990a
commit
95643147c5
.dockerignore.gitignoreDockerfileVagrantfile
assets/etc/kubernetes/armada-loader/assets/charts
calico
kube-dns
charts
.gitignore
cleanup.shapiserver
Chart.yamlrequirements.yaml
templates
values.yamlcalico
controller_manager
coredns
etcd
Chart.yamlrequirements.yaml
templates
values.yamlproxy
rbac
scheduler
docs/source
example
.gitignorebootstrap-armada-config.yaml
gen-ca
genesis-config.yamljoining-host-config.yamlsite-config.yamlvagrant-input-config.yamlpromenade
@ -1,7 +1,6 @@
|
||||
__pycache__
|
||||
Vagrantfile
|
||||
.vagrant
|
||||
promenade.tar
|
||||
promenade.egg-info
|
||||
Vagrantfile
|
||||
__pycache__
|
||||
docs
|
||||
example
|
||||
setup.sh
|
||||
promenade.egg-info
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -13,6 +13,7 @@ __pycache__
|
||||
/promenade.egg-info
|
||||
/tmp
|
||||
.tox/
|
||||
/.eggs
|
||||
ChangeLog
|
||||
|
||||
# Sphinx documentation
|
||||
|
18
Dockerfile
18
Dockerfile
@ -14,11 +14,6 @@
|
||||
|
||||
FROM python:3.6
|
||||
|
||||
ENV CNI_VERSION=v0.5.2 \
|
||||
HELM_VERSION=v2.5.0 \
|
||||
KUBECTL_VERSION=v1.6.8 \
|
||||
KUBELET_VERSION=v1.6.8
|
||||
|
||||
VOLUME /etc/promenade
|
||||
VOLUME /target
|
||||
|
||||
@ -26,17 +21,6 @@ RUN mkdir /promenade
|
||||
WORKDIR /promenade
|
||||
|
||||
RUN set -ex \
|
||||
&& export BIN_DIR=/assets/usr/local/bin \
|
||||
&& mkdir -p $BIN_DIR \
|
||||
&& curl -Lo $BIN_DIR/kubelet https://storage.googleapis.com/kubernetes-release/release/$KUBELET_VERSION/bin/linux/amd64/kubelet \
|
||||
&& curl -Lo $BIN_DIR/kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod 555 $BIN_DIR/kubelet \
|
||||
&& chmod 555 $BIN_DIR/kubectl \
|
||||
&& mkdir -p /assets/opt/cni/bin \
|
||||
&& curl -L https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz | tar -zxv -C /assets/opt/cni/bin/ \
|
||||
&& curl -L https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv -C /tmp linux-amd64/helm \
|
||||
&& mv /tmp/linux-amd64/helm $BIN_DIR/helm \
|
||||
&& chmod 555 $BIN_DIR/helm \
|
||||
&& curl -Lo /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
|
||||
&& chmod 555 /usr/local/bin/cfssl \
|
||||
&& apt-get update -q \
|
||||
@ -48,7 +32,5 @@ RUN set -ex \
|
||||
COPY requirements-frozen.txt /promenade
|
||||
RUN pip install --no-cache-dir -r requirements-frozen.txt
|
||||
|
||||
COPY ./assets/ /assets/
|
||||
|
||||
COPY . /promenade
|
||||
RUN pip install -e /promenade
|
||||
|
7
Vagrantfile
vendored
7
Vagrantfile
vendored
@ -5,7 +5,12 @@ Vagrant.configure("2") do |config|
|
||||
config.vm.box = "promenade/ubuntu1604"
|
||||
config.vm.box_check_update = false
|
||||
|
||||
config.vm.provision :shell, privileged: true, inline:<<EOS
|
||||
provision_env = {}
|
||||
if ENV['http_proxy'] then
|
||||
provision_env['http_proxy'] = ENV['http_proxy']
|
||||
end
|
||||
|
||||
config.vm.provision :shell, privileged: true, env: provision_env, inline:<<EOS
|
||||
set -ex
|
||||
|
||||
echo === Setting up NTP so simulate MaaS environment ===
|
||||
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
# This manfiest installs the Service which gets traffic to the Calico
|
||||
# etcd.
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
component: calico-etcd
|
||||
name: calico-etcd
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# Select the calico-etcd pod running on the master.
|
||||
selector:
|
||||
tier: control-plane
|
||||
component: calico-etcd
|
||||
# This ClusterIP needs to be known in advance, since we cannot rely
|
||||
# on DNS to get access to etcd.
|
||||
clusterIP: {{ .Values.etcd.service.ip }}
|
||||
ports:
|
||||
- port: {{ .Values.etcd.service.port }}
|
@ -1,22 +0,0 @@
|
||||
calico:
|
||||
# Interface used with IP_AUTODETECTION_METHOD=interface=...
|
||||
interface: null
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
etcd:
|
||||
credentials:
|
||||
ca: |-
|
||||
invalid ca
|
||||
cert: |-
|
||||
invalid cert
|
||||
key: |-
|
||||
invalid key
|
||||
|
||||
images:
|
||||
node: quay.io/calico/node:v1.3.0
|
||||
cni: quay.io/calico/cni:v1.9.1
|
||||
policy_controller: quay.io/calico/kube-policy-controller:v0.6.0
|
||||
|
||||
etcd:
|
||||
service:
|
||||
ip: 10.96.232.136
|
||||
port: 6666
|
@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
description: A Helm chart for Kubernetes
|
||||
name: kube-dns
|
||||
version: 0.1.0
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
upstreamNameservers: |-
|
||||
{{ .Values.kube_dns.upstream_nameservers | toJson }}
|
||||
|
||||
|
@ -1,183 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: {{ .Values.kube_dns.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- kube-dns
|
||||
containers:
|
||||
- name: kubedns
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=cluster.local.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=5
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
image: {{ .Values.images.kube_dns }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /kube-dns-config
|
||||
name: kube-dns-config
|
||||
- args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/cluster.local/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
image: {{ .Values.images.dnsmasq }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: dnsmasq
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
name: kube-dns-config
|
||||
- args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
|
||||
image: {{ .Values.images.sidecar }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: sidecar
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: Default
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: kube-dns
|
||||
serviceAccountName: kube-dns
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: kube-dns
|
||||
optional: true
|
||||
name: kube-dns-config
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ .Values.service.cluster_ip }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
@ -1,13 +0,0 @@
|
||||
images:
|
||||
dnsmasq: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
|
||||
kube_dns: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
|
||||
sidecar: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
|
||||
|
||||
kube_dns:
|
||||
replicas: 2
|
||||
upstream_nameservers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
service:
|
||||
cluster_ip: 10.96.0.10
|
4
charts/apiserver/Chart.yaml
Normal file
4
charts/apiserver/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for Kubernetes API server
|
||||
name: apiserver
|
||||
version: 0.1.0
|
4
charts/apiserver/requirements.yaml
Normal file
4
charts/apiserver/requirements.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: http://localhost:8879/charts
|
||||
version: 0.1.0
|
96
charts/apiserver/templates/bin/_anchor.tpl
Normal file
96
charts/apiserver/templates/bin/_anchor.tpl
Normal file
@ -0,0 +1,96 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
export MANIFEST_PATH=/host{{ .Values.anchor.kubelet.manifest_path }}/{{ .Values.service.name }}.yaml
|
||||
export PKI_PATH=/host{{ .Values.apiserver.host_etc_path }}/pki
|
||||
|
||||
copy_certificates() {
|
||||
mkdir -p $PKI_PATH
|
||||
cp /certs/* /keys/* $PKI_PATH
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
mkdir -p $(dirname $MANIFEST_PATH)
|
||||
cat <<EODOC > $MANIFEST_PATH
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: apiserver
|
||||
image: {{ .Values.images.apiserver }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- {{ .Values.apiserver.command }}
|
||||
- --authorization-mode=Node,RBAC
|
||||
- --advertise-address=\$(POD_IP)
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
|
||||
- --anonymous-auth=false
|
||||
- --bind-address=0.0.0.0
|
||||
- --secure-port={{ .Values.apiserver.port }}
|
||||
- --insecure-port=0
|
||||
- --apiserver-count={{ .Values.apiserver.replicas }}
|
||||
|
||||
- --client-ca-file=/etc/kubernetes/apiserver/pki/cluster-ca.pem
|
||||
- --tls-cert-file=/etc/kubernetes/apiserver/pki/apiserver.pem
|
||||
- --tls-private-key-file=/etc/kubernetes/apiserver/pki/apiserver-key.pem
|
||||
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-certificate-authority=/etc/kubernetes/apiserver/pki/cluster-ca.pem
|
||||
- --kubelet-client-certificate=/etc/kubernetes/apiserver/pki/apiserver.pem
|
||||
- --kubelet-client-key=/etc/kubernetes/apiserver/pki/apiserver-key.pem
|
||||
|
||||
- --etcd-servers={{ .Values.apiserver.etcd.endpoints }}
|
||||
- --etcd-cafile=/etc/kubernetes/apiserver/pki/etcd-client-ca.pem
|
||||
- --etcd-certfile=/etc/kubernetes/apiserver/pki/etcd-client.pem
|
||||
- --etcd-keyfile=/etc/kubernetes/apiserver/pki/etcd-client-key.pem
|
||||
|
||||
- --allow-privileged=true
|
||||
|
||||
- --service-cluster-ip-range={{ .Values.network.service_cidr }}
|
||||
- --service-account-key-file=/etc/kubernetes/apiserver/pki/service-account.pub
|
||||
|
||||
- --v=5
|
||||
|
||||
ports:
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /etc/kubernetes/apiserver
|
||||
volumes:
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: {{ .Values.apiserver.host_etc_path }}
|
||||
EODOC
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
rm -f $MANIFEST_PATH
|
||||
rm -rf $PKI_PATH
|
||||
}
|
||||
|
||||
while true; do
|
||||
if [ -e /tmp/stop ]; then
|
||||
echo Stopping
|
||||
cleanup
|
||||
break
|
||||
fi
|
||||
|
||||
if [ ! -e $MANIFEST_PATH ]; then
|
||||
copy_certificates
|
||||
create_manifest
|
||||
fi
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
done
|
4
charts/apiserver/templates/bin/_pre_stop.tpl
Normal file
4
charts/apiserver/templates/bin/_pre_stop.tpl
Normal file
@ -0,0 +1,4 @@
|
||||
set -x
|
||||
|
||||
touch /tmp/stop
|
||||
sleep 10000
|
10
charts/apiserver/templates/configmap-bin.yaml
Normal file
10
charts/apiserver/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
data:
|
||||
anchor: |+
|
||||
{{ tuple "bin/_anchor.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pre_stop: |+
|
||||
{{ tuple "bin/_pre_stop.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
13
charts/apiserver/templates/configmap-certs.yaml
Normal file
13
charts/apiserver/templates/configmap-certs.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-certs
|
||||
data:
|
||||
cluster-ca.pem: {{ .Values.tls.ca | quote }}
|
||||
apiserver.pem: {{ .Values.tls.cert | quote }}
|
||||
|
||||
etcd-client-ca.pem: {{ .Values.apiserver.etcd.tls.ca | quote }}
|
||||
etcd-client.pem: {{ .Values.apiserver.etcd.tls.cert | quote }}
|
||||
|
||||
service-account.pub: {{ .Values.service_account.public_key | quote }}
|
67
charts/apiserver/templates/daemonset.yaml
Normal file
67
charts/apiserver/templates/daemonset.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-anchor
|
||||
labels:
|
||||
application: kubernetes
|
||||
component: kubernetes-apiserver-anchor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.anchor.dns_policy }}
|
||||
nodeSelector:
|
||||
{{ .Values.anchor.node_selector.key }}: {{ .Values.anchor.node_selector.value }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: anchor
|
||||
image: {{ .Values.images.anchor }}
|
||||
command:
|
||||
- /tmp/bin/anchor
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/bin/pre_stop
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
mountPath: /certs
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: keys
|
||||
mountPath: /keys
|
||||
- name: bin
|
||||
mountPath: /tmp/bin
|
||||
terminationGracePeriodSeconds: {{ .Values.anchor.termination_grace_period }}
|
||||
volumes:
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
defaultMode: 0555
|
||||
- name: certs
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-certs
|
||||
defaultMode: 0444
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
- name: keys
|
||||
secret:
|
||||
secretName: {{ .Values.service.name }}-keys
|
||||
defaultMode: 0444
|
9
charts/apiserver/templates/secret-apiserver.yaml
Normal file
9
charts/apiserver/templates/secret-apiserver.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-keys
|
||||
type: Opaque
|
||||
data:
|
||||
apiserver-key.pem: {{ .Values.tls.key | b64enc }}
|
||||
etcd-client-key.pem: {{ .Values.apiserver.etcd.tls.key | b64enc }}
|
14
charts/apiserver/templates/service.yaml
Normal file
14
charts/apiserver/templates/service.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
spec:
|
||||
selector:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
{{- if .Values.service.ip }}
|
||||
clusterIP: {{ .Values.service.ip }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: https
|
||||
port: {{ .Values.apiserver.port }}
|
45
charts/apiserver/values.yaml
Normal file
45
charts/apiserver/values.yaml
Normal file
@ -0,0 +1,45 @@
|
||||
anchor:
|
||||
dns_policy: Default
|
||||
kubelet:
|
||||
manifest_path: /etc/kubernetes/manifests
|
||||
node_selector:
|
||||
key: kubernetes-apiserver
|
||||
value: enabled
|
||||
termination_grace_period: 3600
|
||||
period: 15
|
||||
|
||||
apiserver:
|
||||
command: /apiserver
|
||||
|
||||
etcd:
|
||||
endpoints: https://kubernetes-etcd.kube-system.svc.cluster.local
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
host_etc_path: /etc/kubernetes/apiserver
|
||||
|
||||
port: 6443
|
||||
replicas: 3
|
||||
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
|
||||
network:
|
||||
kubernetes_service_ip: 10.96.0.1
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
|
||||
service:
|
||||
name: kubernetes-apiserver
|
||||
ip: null
|
||||
|
||||
service_account:
|
||||
public_key: placeholder
|
||||
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
@ -57,6 +57,9 @@ spec:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,bgp"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
@ -95,9 +98,9 @@ spec:
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: ""
|
||||
{{- if .Values.calico.interface }}
|
||||
{{- if .Values.calico.ip_autodetection_method }}
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: interface={{ .Values.calico.interface }}
|
||||
value: {{ .Values.calico.ip_autodetection_method }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -144,6 +147,41 @@ spec:
|
||||
name: cni-net-dir
|
||||
- mountPath: /calico-secrets
|
||||
name: etcd-certs
|
||||
{{- if .Values.calico.ctl.install_on_host }}
|
||||
# This container installs calicoctl on each node.
|
||||
- name: install-calicoctl
|
||||
image: {{ .Values.images.cni }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |-
|
||||
set -ex
|
||||
|
||||
cat <<'SCRIPT' > /target/calicoctl
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
exec docker run --rm -it \
|
||||
--net host \
|
||||
-e ETCD_CA_CERT_FILE=/etc/cni/net.d/calico-tls/etcd-ca \
|
||||
-e ETCD_CERT_FILE=/etc/cni/net.d/calico-tls/etcd-cert \
|
||||
-e ETCD_KEY_FILE=/etc/cni/net.d/calico-tls/etcd-key \
|
||||
-e ETCD_ENDPOINTS=https://{{ .Values.etcd.service.ip }}:{{ .Values.etcd.service.port }},https://127.0.0.1:{{ .Values.etcd.service.port }} \
|
||||
-v /etc/cni/net.d/calico-tls:/etc/cni/net.d/calico-tls \
|
||||
{{ .Values.images.ctl }} \
|
||||
$*
|
||||
SCRIPT
|
||||
|
||||
chmod 755 /target/calicoctl
|
||||
|
||||
while true ; do
|
||||
sleep 10000
|
||||
done
|
||||
|
||||
volumeMounts:
|
||||
- name: host-bin
|
||||
mountPath: /target
|
||||
{{- end }}
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
@ -163,3 +201,8 @@ spec:
|
||||
- name: etcd-certs
|
||||
secret:
|
||||
secretName: calico-etcd-secrets
|
||||
{{- if .Values.calico.ctl.install_on_host }}
|
||||
- name: host-bin
|
||||
hostPath:
|
||||
path: /usr/local/bin
|
||||
{{- end }}
|
@ -12,6 +12,6 @@ data:
|
||||
# not using TLS for etcd.
|
||||
# This self-hosted install expects three files with the following names. The values
|
||||
# should be base64 encoded strings of the entire contents of each file.
|
||||
etcd-key: {{ .Values.calico.etcd.credentials.key | b64enc }}
|
||||
etcd-cert: {{ .Values.calico.etcd.credentials.cert | b64enc }}
|
||||
etcd-ca: {{ .Values.calico.etcd.credentials.ca | b64enc }}
|
||||
etcd-key: {{ .Values.etcd.tls.key | b64enc }}
|
||||
etcd-cert: {{ .Values.etcd.tls.cert | b64enc }}
|
||||
etcd-ca: {{ .Values.etcd.tls.ca | b64enc }}
|
23
charts/calico/values.yaml
Normal file
23
charts/calico/values.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
calico:
|
||||
ip_autodetection_method: null
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
ctl:
|
||||
install_on_host: false
|
||||
|
||||
etcd:
|
||||
service:
|
||||
ip: 10.96.232.136
|
||||
port: 6666
|
||||
tls:
|
||||
ca: |-
|
||||
invalid ca
|
||||
cert: |-
|
||||
invalid cert
|
||||
key: |-
|
||||
invalid key
|
||||
|
||||
images:
|
||||
cni: quay.io/calico/cni:v1.10.0
|
||||
ctl: quay.io/calico/ctl:v1.5.0
|
||||
node: quay.io/calico/node:v2.5.1
|
||||
policy_controller: quay.io/calico/kube-policy-controller:v0.7.0
|
4
charts/controller_manager/Chart.yaml
Normal file
4
charts/controller_manager/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for Kubernetes controller-manager
|
||||
name: controller_manager
|
||||
version: 0.1.0
|
4
charts/controller_manager/requirements.yaml
Normal file
4
charts/controller_manager/requirements.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: http://localhost:8879/charts
|
||||
version: 0.1.0
|
79
charts/controller_manager/templates/bin/_anchor.tpl
Normal file
79
charts/controller_manager/templates/bin/_anchor.tpl
Normal file
@ -0,0 +1,79 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
export MANIFEST_PATH=/host{{ .Values.anchor.kubelet.manifest_path }}/{{ .Values.service.name }}.yaml
|
||||
export ETC_PATH=/host{{ .Values.controller_manager.host_etc_path }}
|
||||
|
||||
copy_etc_files() {
|
||||
mkdir -p $ETC_PATH
|
||||
cp /configmap/* /secret/* $ETC_PATH
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
mkdir -p $(dirname $MANIFEST_PATH)
|
||||
cat <<EODOC > $MANIFEST_PATH
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: controller-manager
|
||||
image: {{ .Values.images.controller_manager }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- {{ .Values.controller_manager.command }}
|
||||
- --allocate-node-cidrs=true
|
||||
- --cluster-cidr={{ .Values.network.pod_cidr }}
|
||||
- --configure-cloud-routes=false
|
||||
- --leader-elect=true
|
||||
- --node-monitor-period={{ .Values.controller_manager.node_monitor_period }}
|
||||
- --node-monitor-grace-period={{ .Values.controller_manager.node_monitor_grace_period }}
|
||||
- --pod-eviction-timeout={{ .Values.controller_manager.pod_eviction_timeout }}
|
||||
- --kubeconfig=/etc/kubernetes/controller-manager/kubeconfig.yaml
|
||||
- --root-ca-file=/etc/kubernetes/controller-manager/cluster-ca.pem
|
||||
- --service-account-private-key-file=/etc/kubernetes/controller-manager/service-account.priv
|
||||
- --service-cluster-ip-range={{ .Values.network.service_cidr }}
|
||||
- --use-service-account-credentials=true
|
||||
|
||||
- --v=5
|
||||
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /etc/kubernetes/controller-manager
|
||||
volumes:
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: {{ .Values.controller_manager.host_etc_path }}
|
||||
EODOC
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
rm -f $MANIFEST_PATH
|
||||
rm -rf $ETC_PATH
|
||||
}
|
||||
|
||||
while true; do
|
||||
if [ -e /tmp/stop ]; then
|
||||
echo Stopping
|
||||
cleanup
|
||||
break
|
||||
fi
|
||||
|
||||
if [ ! -e $MANIFEST_PATH ]; then
|
||||
copy_etc_files
|
||||
create_manifest
|
||||
fi
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
done
|
4
charts/controller_manager/templates/bin/_pre_stop.tpl
Normal file
4
charts/controller_manager/templates/bin/_pre_stop.tpl
Normal file
@ -0,0 +1,4 @@
|
||||
set -x
|
||||
|
||||
touch /tmp/stop
|
||||
sleep 10000
|
10
charts/controller_manager/templates/configmap-bin.yaml
Normal file
10
charts/controller_manager/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
data:
|
||||
anchor: |+
|
||||
{{ tuple "bin/_anchor.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pre_stop: |+
|
||||
{{ tuple "bin/_pre_stop.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
29
charts/controller_manager/templates/configmap-etc.yaml
Normal file
29
charts/controller_manager/templates/configmap-etc.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
data:
|
||||
cluster-ca.pem: {{ .Values.tls.ca | quote }}
|
||||
controller-manager.pem: {{ .Values.tls.cert | quote }}
|
||||
kubeconfig.yaml: |+
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://{{ .Values.network.kubernetes_netloc }}
|
||||
certificate-authority: cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: controller-manager
|
||||
name: controller-manager@kubernetes
|
||||
current-context: controller-manager@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
client-certificate: controller-manager.pem
|
||||
client-key: controller-manager-key.pem
|
69
charts/controller_manager/templates/daemonset.yaml
Normal file
69
charts/controller_manager/templates/daemonset.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-anchor
|
||||
labels:
|
||||
application: kubernetes
|
||||
component: kubernetes-controller-manager-anchor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
labels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.anchor.dns_policy }}
|
||||
nodeSelector:
|
||||
{{ .Values.anchor.node_selector.key }}: {{ .Values.anchor.node_selector.value }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: anchor
|
||||
image: {{ .Values.images.anchor }}
|
||||
command:
|
||||
- /tmp/bin/anchor
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/bin/pre_stop
|
||||
volumeMounts:
|
||||
- name: bin
|
||||
mountPath: /tmp/bin
|
||||
- name: etc
|
||||
mountPath: /configmap
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: secret
|
||||
mountPath: /secret
|
||||
terminationGracePeriodSeconds: {{ .Values.anchor.termination_grace_period }}
|
||||
volumes:
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
defaultMode: 0555
|
||||
- name: etc
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
defaultMode: 0444
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: {{ .Values.service.name }}
|
||||
defaultMode: 0444
|
9
charts/controller_manager/templates/secret.yaml
Normal file
9
charts/controller_manager/templates/secret.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
type: Opaque
|
||||
data:
|
||||
controller-manager-key.pem: {{ .Values.tls.key | b64enc }}
|
||||
service-account.priv: {{ .Values.service_account.private_key | b64enc }}
|
36
charts/controller_manager/values.yaml
Normal file
36
charts/controller_manager/values.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
anchor:
|
||||
dns_policy: Default
|
||||
kubelet:
|
||||
manifest_path: /etc/kubernetes/manifests
|
||||
node_selector:
|
||||
key: kubernetes-controller-manager
|
||||
value: enabled
|
||||
period: 15
|
||||
termination_grace_period: 3600
|
||||
|
||||
controller_manager:
|
||||
command: /controller-manager
|
||||
host_etc_path: /etc/kubernetes/controller-manager
|
||||
node_monitor_period: 5s
|
||||
node_monitor_grace_period: 20s
|
||||
pod_eviction_timeout: 60s
|
||||
|
||||
service_account:
|
||||
private_key: placeholder
|
||||
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
images:
|
||||
anchor: quay.io/attcomdev/kube-controller-manager:v1.8.0
|
||||
controller_manager: quay.io/attcomdev/kube-controller-manager:v1.8.0
|
||||
|
||||
network:
|
||||
kubernetes_netloc: 10.96.0.1
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
|
||||
service:
|
||||
name: kubernetes-controller-manager
|
4
charts/coredns/Chart.yaml
Normal file
4
charts/coredns/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for coredns
|
||||
name: coredns
|
||||
version: 0.1.0
|
129
charts/coredns/templates/bin/_anchor.tpl
Normal file
129
charts/coredns/templates/bin/_anchor.tpl
Normal file
@ -0,0 +1,129 @@
|
||||
#!/bin/sh
|
||||
|
||||
{{- $envAll := . }}
|
||||
|
||||
set -x
|
||||
|
||||
export MANIFEST_PATH=/host{{ .Values.anchor.kubelet.manifest_path }}/{{ .Values.service.name }}.yaml
|
||||
export ETC_PATH=/host{{ .Values.coredns.host_etc_path }}
|
||||
TOKEN_PATH=/var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
CA_CERT_PATH=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
|
||||
copy_etc_files() {
|
||||
mkdir -p $ETC_PATH/zones
|
||||
cp /configmap/* /secret/* $ETC_PATH
|
||||
create_corefile
|
||||
}
|
||||
|
||||
create_corefile() {
|
||||
cat <<EOCOREFILE > $ETC_PATH/Corefile
|
||||
promenade {
|
||||
file /etc/coredns/zones/promenade
|
||||
loadbalance
|
||||
errors stdout
|
||||
log stdout
|
||||
}
|
||||
|
||||
{{ .Values.coredns.cluster_domain }} {
|
||||
kubernetes {
|
||||
endpoint https://{{ .Values.network.kubernetes_netloc }}
|
||||
tls /etc/coredns/coredns.pem /etc/coredns/coredns-key.pem /etc/coredns/cluster-ca.pem
|
||||
}
|
||||
loadbalance
|
||||
cache {{ .Values.coredns.cache.ttl }}
|
||||
errors stdout
|
||||
log stdout
|
||||
}
|
||||
|
||||
. {
|
||||
{{- if .Values.coredns.upstream_nameservers }}
|
||||
proxy . {{- range .Values.coredns.upstream_nameservers }} {{ . -}}{{- end }}
|
||||
{{- end }}
|
||||
errors stdout
|
||||
log stdout
|
||||
}
|
||||
EOCOREFILE
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
mkdir -p $(dirname $MANIFEST_PATH)
|
||||
# XXX liveness/readiness probes
|
||||
cat <<EODOC > $MANIFEST_PATH
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
anchor-managed: enabled
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: coredns
|
||||
image: {{ .Values.images.coredns }}
|
||||
command:
|
||||
- /coredns
|
||||
- -conf
|
||||
- /etc/coredns/Corefile
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /etc/coredns
|
||||
volumes:
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: {{ .Values.coredns.host_etc_path }}
|
||||
EODOC
|
||||
}
|
||||
|
||||
update_managed_zones() {
|
||||
{{- range .Values.coredns.zones }}
|
||||
|
||||
FILENAME="$ETC_PATH/zones/{{ .name }}"
|
||||
NEXT_FILENAME="${FILENAME}-next"
|
||||
SUCCESS=1
|
||||
NOW=$(date +%s)
|
||||
|
||||
# Add Header
|
||||
cat <<EOBIND > $NEXT_FILENAME
|
||||
\$ORIGIN {{ .name }}.
|
||||
{{ .name }}. IN SOA @ root $NOW 3h 15m 1w 1d
|
||||
|
||||
EOBIND
|
||||
{{ range .services }}
|
||||
# Don't accidentally log service account token
|
||||
set +x
|
||||
SERVICE_IPS=$(kubectl \
|
||||
--server https://{{ $envAll.Values.network.kubernetes_netloc }} \
|
||||
--certificate-authority $CA_CERT_PATH \
|
||||
--token $(cat $TOKEN_PATH) \
|
||||
-n {{ .service.namespace }} \
|
||||
get ep {{ .service.name }} \
|
||||
-o 'jsonpath={.subsets[*].addresses[*].ip}')
|
||||
set -x
|
||||
if [ "x$SERVICE_IPS" != "x" ]; then
|
||||
for IP in $SERVICE_IPS; do
|
||||
echo {{ .bind_name }} IN A $IP >> $NEXT_FILENAME
|
||||
done
|
||||
else
|
||||
echo Failed to upate zone file for {{ .name }}
|
||||
SUCCESS=0
|
||||
fi
|
||||
{{- end }}
|
||||
|
||||
if [ $SUCCESS = 1 ]; then
|
||||
echo Replacing zone file $FILENAME
|
||||
mv $NEXT_FILENAME $FILENAME
|
||||
fi
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
copy_etc_files
|
||||
create_manifest
|
||||
|
||||
while true; do
|
||||
update_managed_zones
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
done
|
8
charts/coredns/templates/configmap-bin.yaml
Normal file
8
charts/coredns/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
data:
|
||||
anchor: |+
|
||||
{{ tuple "bin/_anchor.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
8
charts/coredns/templates/configmap-etc.yaml
Normal file
8
charts/coredns/templates/configmap-etc.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
data:
|
||||
cluster-ca.pem: {{ .Values.tls.ca | quote }}
|
||||
coredns.pem: {{ .Values.tls.cert | quote }}
|
83
charts/coredns/templates/daemonset.yaml
Normal file
83
charts/coredns/templates/daemonset.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-anchor
|
||||
labels:
|
||||
application: coredns
|
||||
component: coredns-anchor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{{- if .Values.node_selector.key }}
|
||||
nodeSelector:
|
||||
{{ .Values.node_selector.key }}: {{ .Values.node_selector.value }}
|
||||
{{- end }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
serviceAccountName: {{ .Values.service.name }}
|
||||
containers:
|
||||
- name: anchor
|
||||
image: {{ .Values.images.anchor }}
|
||||
command:
|
||||
- /tmp/bin/anchor
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /healthz
|
||||
# port: 10249
|
||||
# initialDelaySeconds: 15
|
||||
# periodSeconds: 15
|
||||
# failureThreshold: 3
|
||||
# readinessProbe:
|
||||
# exec:
|
||||
# command:
|
||||
# - sh
|
||||
# - -c
|
||||
# - |-
|
||||
# set -ex
|
||||
#
|
||||
# iptables-save | grep 'default/kubernetes:https'
|
||||
# initialDelaySeconds: 15
|
||||
# periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: bin
|
||||
mountPath: /tmp/bin
|
||||
- name: etc
|
||||
mountPath: /configmap
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: secret
|
||||
mountPath: /secret
|
||||
volumes:
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
defaultMode: 0555
|
||||
- name: etc
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
defaultMode: 0444
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: {{ .Values.service.name }}
|
||||
defaultMode: 0444
|
43
charts/coredns/templates/rbac.yaml
Normal file
43
charts/coredns/templates/rbac.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: coredns
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
subjects:
|
||||
- kind: User
|
||||
name: coredns
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
8
charts/coredns/templates/secret.yaml
Normal file
8
charts/coredns/templates/secret.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
type: Opaque
|
||||
data:
|
||||
coredns-key.pem: {{ .Values.tls.key | b64enc }}
|
16
charts/coredns/templates/service.yaml
Normal file
16
charts/coredns/templates/service.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
spec:
|
||||
selector:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
clusterIP: {{ .Values.service.ip }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
40
charts/coredns/values.yaml
Normal file
40
charts/coredns/values.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
anchor:
|
||||
kubelet:
|
||||
manifest_path: /etc/kubernetes/manifests
|
||||
node_selector: {}
|
||||
period: 30
|
||||
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
coredns:
|
||||
cluster_domain: cluster.local
|
||||
cache:
|
||||
ttl: 60
|
||||
host_etc_path: /etc/coredns
|
||||
upstream_nameservers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
zones:
|
||||
- name: promenade
|
||||
services:
|
||||
- bind_name: apiserver.kubernetes
|
||||
service:
|
||||
name: kubernetes
|
||||
namespace: default
|
||||
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
coredns: coredns/coredns:011
|
||||
|
||||
network:
|
||||
kubernetes_netloc: 10.96.0.1
|
||||
|
||||
node_selector: {}
|
||||
|
||||
service:
|
||||
name: coredns
|
||||
ip: 10.96.0.10
|
4
charts/etcd/Chart.yaml
Normal file
4
charts/etcd/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for a DaemonSet-based etcd deployment.
|
||||
name: etcd
|
||||
version: 0.1.0
|
4
charts/etcd/requirements.yaml
Normal file
4
charts/etcd/requirements.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: http://localhost:8879/charts
|
||||
version: 0.1.0
|
198
charts/etcd/templates/bin/_etcdctl_anchor.tpl
Normal file
198
charts/etcd/templates/bin/_etcdctl_anchor.tpl
Normal file
@ -0,0 +1,198 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
export CLIENT_ENDPOINT=https://$POD_IP:{{ .Values.service.client.target_port }}
|
||||
export PEER_ENDPOINT=https://$POD_IP:{{ .Values.service.peer.target_port }}
|
||||
export MANIFEST_PATH=/manifests/{{ .Values.service.name }}.yaml
|
||||
|
||||
function copy_certificates {
|
||||
ETCD_NAME=$1
|
||||
|
||||
set -e
|
||||
|
||||
mkdir -p /etcd-etc/tls
|
||||
# Copy CA Certificates in place
|
||||
cp \
|
||||
/etc/etcd/tls/certs/client-ca.pem \
|
||||
/etc/etcd/tls/certs/peer-ca.pem \
|
||||
/etcd-etc/tls
|
||||
|
||||
cp /etc/etcd/tls/certs/$ETCD_NAME-etcd-client.pem /etcd-etc/tls/etcd-client.pem
|
||||
cp /etc/etcd/tls/certs/$ETCD_NAME-etcd-peer.pem /etcd-etc/tls/etcd-peer.pem
|
||||
|
||||
cp /etc/etcd/tls/keys/$ETCD_NAME-etcd-client-key.pem /etcd-etc/tls/etcd-client-key.pem
|
||||
cp /etc/etcd/tls/keys/$ETCD_NAME-etcd-peer-key.pem /etcd-etc/tls/etcd-peer-key.pem
|
||||
|
||||
set +e
|
||||
}
|
||||
|
||||
function create_manifest {
|
||||
ETCD_INITIAL_CLUSTER=$1
|
||||
ETCD_INITIAL_CLUSTER_STATE=$2
|
||||
cat <<EODOC > $MANIFEST_PATH
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ .Values.images.etcd }}
|
||||
env:
|
||||
- name: ETCD_NAME
|
||||
value: $ETCD_NAME
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: ETCD_CLIENT_CERT_AUTH
|
||||
value: "true"
|
||||
- name: ETCD_PEER_CLIENT_CERT_AUTH
|
||||
value: "true"
|
||||
- name: ETCD_DATA_DIR
|
||||
value: /var/lib/etcd
|
||||
- name: ETCD_TRUSTED_CA_FILE
|
||||
value: /etc/etcd/tls/client-ca.pem
|
||||
- name: ETCD_CERT_FILE
|
||||
value: /etc/etcd/tls/etcd-client.pem
|
||||
- name: ETCD_STRICT_RECONFIG_CHECK
|
||||
value: "true"
|
||||
- name: ETCD_KEY_FILE
|
||||
value: /etc/etcd/tls/etcd-client-key.pem
|
||||
- name: ETCD_PEER_TRUSTED_CA_FILE
|
||||
value: /etc/etcd/tls/peer-ca.pem
|
||||
- name: ETCD_PEER_CERT_FILE
|
||||
value: /etc/etcd/tls/etcd-peer.pem
|
||||
- name: ETCD_PEER_KEY_FILE
|
||||
value: /etc/etcd/tls/etcd-peer-key.pem
|
||||
- name: ETCD_ADVERTISE_CLIENT_URLS
|
||||
value: https://\$(POD_IP):{{ .Values.service.client.target_port }}
|
||||
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
|
||||
value: https://\$(POD_IP):{{ .Values.service.peer.target_port }}
|
||||
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
||||
value: {{ .Values.service.name }}-init-token
|
||||
- name: ETCD_LISTEN_CLIENT_URLS
|
||||
value: https://0.0.0.0:{{ .Values.service.client.target_port }}
|
||||
- name: ETCD_LISTEN_PEER_URLS
|
||||
value: https://0.0.0.0:{{ .Values.service.peer.target_port }}
|
||||
- name: ETCD_INITIAL_CLUSTER_STATE
|
||||
value: $ETCD_INITIAL_CLUSTER_STATE
|
||||
- name: ETCD_INITIAL_CLUSTER
|
||||
value: $ETCD_INITIAL_CLUSTER
|
||||
- name: ETCDCTL_API
|
||||
value: '3'
|
||||
- name: ETCDCTL_DIAL_TIMEOUT
|
||||
value: 3s
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://127.0.0.1:{{ .Values.service.client.target_port }}
|
||||
- name: ETCDCTL_CACERT
|
||||
value: \$(ETCD_TRUSTED_CA_FILE)
|
||||
- name: ETCDCTL_CERT
|
||||
value: \$(ETCD_CERT_FILE)
|
||||
- name: ETCDCTL_KEY
|
||||
value: \$(ETCD_KEY_FILE)
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/etcd
|
||||
- name: etc
|
||||
mountPath: /etc/etcd
|
||||
volumes:
|
||||
- name: data
|
||||
hostPath:
|
||||
path: {{ .Values.etcd.host_data_path }}
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: {{ .Values.etcd.host_etc_path }}
|
||||
...
|
||||
EODOC
|
||||
}
|
||||
|
||||
while true; do
|
||||
# TODO(mark-burnett) Need to monitor a file(s) when shutting down/starting
|
||||
# up so I don't try to take two actions on the node at once.
|
||||
{{- if .Values.bootstrapping.enabled }}
|
||||
if [ -e /bootstrapping/{{ .Values.bootstrapping.filename }} ]; then
|
||||
# If the first node is starting, wait for it to become healthy
|
||||
end=$(($(date +%s) + {{ .Values.bootstrapping.timeout }}))
|
||||
while etcdctl member list | grep $POD_IP; do
|
||||
if ETCDCTL_ENDPOINTS=$CLIENT_ENDPOINT etcdctl endpoint health; then
|
||||
echo Member appears healthy, removing bootstrap file.
|
||||
rm /bootstrapping/{{ .Values.bootstrapping.filename }}
|
||||
break
|
||||
else
|
||||
now=$(date +%s)
|
||||
if [ $now -gt $end ]; then
|
||||
echo Member did not start successfully before bootstrap timeout. Deleting and trying again.
|
||||
rm -f $MANIFEST_PATH
|
||||
sleep {{ .Values.anchor.period }}
|
||||
break
|
||||
fi
|
||||
sleep {{ .Values.anchor.period }}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -e /bootstrapping/{{ .Values.bootstrapping.filename }} ]; then
|
||||
# Bootstrap the first node
|
||||
copy_certificates ${ETCD_NAME}
|
||||
ETCD_INITIAL_CLUSTER=${ETCD_NAME}=$PEER_ENDPOINT
|
||||
ETCD_INITIAL_CLUSTER_STATE=new
|
||||
create_manifest $ETCD_INITIAL_CLUSTER $ETCD_INITIAL_CLUSTER_STATE
|
||||
|
||||
continue
|
||||
fi
|
||||
{{- end }}
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
|
||||
if [ -e /tmp/stopped ]; then
|
||||
echo Stopping
|
||||
break
|
||||
fi
|
||||
|
||||
if [ -e /tmp/stopping ]; then
|
||||
echo Waiting to stop..
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ ! -e $MANIFEST_PATH ]; then
|
||||
if ! etcdctl member list > /tmp/members; then
|
||||
echo Failed to locate existing cluster
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! grep $PEER_ENDPOINT /tmp/members; then
|
||||
if grep -v '\bstarted\b' /tmp/members; then
|
||||
echo Cluster does not appear fully online, waiting.
|
||||
continue
|
||||
fi
|
||||
|
||||
# Add this member to the cluster
|
||||
etcdctl member add $HOSTNAME --peer-urls $PEER_ENDPOINT
|
||||
fi
|
||||
|
||||
# If needed, drop the file in place
|
||||
if [ ! -e FILE ]; then
|
||||
# Refresh member list
|
||||
etcdctl member list > /tmp/members
|
||||
|
||||
if grep $PEER_ENDPOINT /tmp/members; then
|
||||
copy_certificates ${ETCD_NAME}
|
||||
|
||||
ETCD_INITIAL_CLUSTER=$(grep -v $PEER_ENDPOINT /tmp/members \
|
||||
| awk -F ', ' '{ print $3 "=" $4 }' \
|
||||
| tr '\n' ',' \
|
||||
| sed "s;\$;$ETCD_NAME=$PEER_ENDPOINT;")
|
||||
ETCD_INITIAL_CLUSTER_STATE=existing
|
||||
|
||||
create_manifest $ETCD_INITIAL_CLUSTER $ETCD_INITIAL_CLUSTER_STATE
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
35
charts/etcd/templates/bin/_pre_stop.tpl
Normal file
35
charts/etcd/templates/bin/_pre_stop.tpl
Normal file
@ -0,0 +1,35 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
export PEER_ENDPOINT=https://$POD_IP:{{ .Values.service.peer.target_port }}
|
||||
export MANIFEST_PATH=/manifests/{{ .Values.service.name }}.yaml
|
||||
|
||||
function cleanup_host {
|
||||
rm -f $MANIFEST_PATH
|
||||
rm -rf /etcd-etc/tls/
|
||||
{{- if .Values.etcd.cleanup_data }}
|
||||
rm -rf /etcd-data/*
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
# Let the anchor process know it should not try to start the server.
|
||||
touch /tmp/stopping
|
||||
|
||||
while true; do
|
||||
if etcdctl member list > /tmp/stop_members; then
|
||||
if grep $PEER_ENDPOINT /tmp/stop_members; then
|
||||
# Find and remove the member from the cluster.
|
||||
MEMBER_ID=$(grep $PEER_ENDPOINT /tmp/stop_members | awk -F ', ' '{ print $1 }')
|
||||
etcdctl member remove $MEMBER_ID
|
||||
else
|
||||
cleanup_host
|
||||
touch /tmp/stopped
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
echo Failed to locate existing cluster
|
||||
fi
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
done
|
7
charts/etcd/templates/bin/_readiness.tpl
Normal file
7
charts/etcd/templates/bin/_readiness.tpl
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
export ETCDCTL_ENDPOINTS=https://$POD_IP:{{ .Values.service.client.target_port }}
|
||||
|
||||
etcdctl endpoint health
|
12
charts/etcd/templates/configmap-bin.yaml
Normal file
12
charts/etcd/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
data:
|
||||
etcdctl_anchor: |+
|
||||
{{ tuple "bin/_etcdctl_anchor.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pre_stop: |+
|
||||
{{ tuple "bin/_pre_stop.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
readiness: |+
|
||||
{{ tuple "bin/_readiness.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
16
charts/etcd/templates/configmap-certs.yaml
Normal file
16
charts/etcd/templates/configmap-certs.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $envAll.Values.service.name }}-certs
|
||||
data:
|
||||
anchor-etcd-client.pem: {{ $envAll.Values.anchor.tls.cert | quote }}
|
||||
client-ca.pem: {{ $envAll.Values.tls.client.ca | quote }}
|
||||
peer-ca.pem: {{ $envAll.Values.tls.peer.ca | quote }}
|
||||
{{- range .Values.nodes }}
|
||||
{{- $node := . }}
|
||||
{{ $node.name }}-etcd-client.pem: {{ $node.tls.client.cert | quote }}
|
||||
{{ $node.name }}-etcd-peer.pem: {{ $node.tls.peer.cert | quote }}
|
||||
{{- end }}
|
||||
...
|
111
charts/etcd/templates/daemonset-anchor.yaml
Normal file
111
charts/etcd/templates/daemonset-anchor.yaml
Normal file
@ -0,0 +1,111 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-anchor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.anchor.dns_policy }}
|
||||
nodeSelector:
|
||||
{{ .Values.anchor.node_selector.key }}: {{ .Values.anchor.node_selector.value }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: etcdctl
|
||||
image: {{ .Values.images.etcdctl }}
|
||||
command:
|
||||
- /tmp/etcdctl_anchor
|
||||
env:
|
||||
- name: ETCD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: ETCDCTL_API
|
||||
value: '3'
|
||||
- name: ETCDCTL_DIAL_TIMEOUT
|
||||
value: 3s
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://{{ .Values.anchor.etcdctl_endpoint }}:{{ .Values.service.client.port }}
|
||||
- name: ETCDCTL_CACERT
|
||||
value: /etc/etcd/tls/certs/client-ca.pem
|
||||
- name: ETCDCTL_CERT
|
||||
value: /etc/etcd/tls/certs/anchor-etcd-client.pem
|
||||
- name: ETCDCTL_KEY
|
||||
value: /etc/etcd/tls/keys/anchor-etcd-client-key.pem
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/readiness
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/pre_stop
|
||||
volumeMounts:
|
||||
- name: bin
|
||||
mountPath: /tmp
|
||||
{{- if .Values.bootstrapping.enabled }}
|
||||
- name: bootstrapping
|
||||
mountPath: /bootstrapping
|
||||
{{- end }}
|
||||
- name: certs
|
||||
mountPath: /etc/etcd/tls/certs
|
||||
- name: etcd-etc
|
||||
mountPath: /etcd-etc
|
||||
{{- if .Values.etcd.cleanup_data }}
|
||||
- name: etcd-data
|
||||
mountPath: /etcd-data
|
||||
{{- end }}
|
||||
- name: keys
|
||||
mountPath: /etc/etcd/tls/keys
|
||||
- name: kubelet-manifests
|
||||
mountPath: /manifests
|
||||
terminationGracePeriodSeconds: {{ .Values.anchor.termination_grace_period }}
|
||||
volumes:
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
defaultMode: 0555
|
||||
{{- if .Values.bootstrapping.enabled }}
|
||||
- name: bootstrapping
|
||||
hostPath:
|
||||
path: {{ .Values.bootstrapping.host_directory }}
|
||||
{{- end }}
|
||||
- name: certs
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-certs
|
||||
defaultMode: 0444
|
||||
{{- if .Values.etcd.cleanup_data }}
|
||||
- name: etcd-data
|
||||
hostPath:
|
||||
path: {{ .Values.etcd.host_data_path }}
|
||||
{{- end }}
|
||||
- name: keys
|
||||
secret:
|
||||
secretName: {{ .Values.service.name }}-keys
|
||||
defaultMode: 0444
|
||||
- name: etcd-etc
|
||||
hostPath:
|
||||
path: {{ .Values.etcd.host_etc_path }}
|
||||
- name: kubelet-manifests
|
||||
hostPath:
|
||||
path: {{ .Values.anchor.kubelet.manifest_path }}
|
14
charts/etcd/templates/secret-keys.yaml
Normal file
14
charts/etcd/templates/secret-keys.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ $envAll.Values.service.name }}-keys
|
||||
type: Opaque
|
||||
data:
|
||||
anchor-etcd-client-key.pem: {{ $envAll.Values.anchor.tls.key | b64enc }}
|
||||
{{- range .Values.nodes }}
|
||||
{{- $node := . }}
|
||||
{{ $node.name }}-etcd-client-key.pem: {{ $node.tls.client.key | b64enc }}
|
||||
{{ $node.name }}-etcd-peer-key.pem: {{ $node.tls.peer.key | b64enc }}
|
||||
{{- end }}
|
16
charts/etcd/templates/service.yaml
Normal file
16
charts/etcd/templates/service.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $envAll.Values.service.name }}
|
||||
spec:
|
||||
selector:
|
||||
{{ $envAll.Values.service.name }}-service: enabled
|
||||
{{- if $envAll.Values.service.ip }}
|
||||
clusterIP: {{ $envAll.Values.service.ip }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: etcd
|
||||
port: {{ $envAll.Values.service.client.port }}
|
||||
targetPort: {{ $envAll.Values.service.client.target_port }}
|
60
charts/etcd/values.yaml
Normal file
60
charts/etcd/values.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
anchor:
|
||||
dns_policy: Default
|
||||
etcdctl_endpoint: example-etcd
|
||||
host_data_path: /var/lib/etcd/example
|
||||
|
||||
kubelet:
|
||||
manifest_path: /etc/kubernetes/manifests
|
||||
|
||||
node_selector:
|
||||
key: etcd-example
|
||||
value: enabled
|
||||
|
||||
termination_grace_period: 3600
|
||||
period: 15
|
||||
|
||||
tls:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
etcd:
|
||||
host_etc_path: /etc/etcd-example
|
||||
host_data_path: /var/lib/etcd/example
|
||||
cleanup_data: true
|
||||
|
||||
service:
|
||||
name: example-etcd
|
||||
ip: null
|
||||
client:
|
||||
port: 2379
|
||||
target_port: 2379
|
||||
peer:
|
||||
port: 2380
|
||||
target_port: 2380
|
||||
|
||||
bootstrapping:
|
||||
enabled: false
|
||||
host_directory: /var/lib/anchor/etcd-example
|
||||
filename: bootstrap
|
||||
# XXX Can I just generalize to an anchor timeout?
|
||||
timeout: 300
|
||||
|
||||
tls:
|
||||
client:
|
||||
ca: placeholder
|
||||
peer:
|
||||
ca: placeholder
|
||||
|
||||
nodes:
|
||||
- name: example-0
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
images:
|
||||
etcd: quay.io/coreos/etcd:v3.2.7
|
||||
etcdctl: quay.io/coreos/etcd:v3.2.7
|
4
charts/proxy/Chart.yaml
Normal file
4
charts/proxy/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for the Kubernetes proxy.
|
||||
name: proxy
|
||||
version: 0.1.0
|
30
charts/proxy/templates/configmap.yaml
Normal file
30
charts/proxy/templates/configmap.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubernetes-proxy
|
||||
data:
|
||||
kubeconfig.yaml: |-
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://{{ .Values.network.kubernetes_netloc }}
|
||||
certificate-authority: pki/cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: proxy
|
||||
name: proxy@kubernetes
|
||||
current-context: proxy@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: proxy
|
||||
user:
|
||||
client-certificate: pki/proxy.pem
|
||||
client-key: pki/proxy-key.pem
|
||||
|
||||
cluster-ca.pem: {{ .Values.tls.ca | quote }}
|
||||
proxy.pem: {{ .Values.tls.cert | quote }}
|
87
charts/proxy/templates/daemonset.yaml
Normal file
87
charts/proxy/templates/daemonset.yaml
Normal file
@ -0,0 +1,87 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubernetes-proxy
|
||||
labels:
|
||||
component: k8s-proxy
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
component: k8s-proxy
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.dns_policy }}
|
||||
{{- if .Values.node_selector.key }}
|
||||
nodeSelector:
|
||||
{{ .Values.node_selector.key }}: {{ .Values.node_selector.value }}
|
||||
{{- end }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: proxy
|
||||
image: {{ .Values.images.proxy }}
|
||||
command:
|
||||
- {{ .Values.proxy.command }}
|
||||
- --cluster-cidr={{ .Values.network.pod_cidr }}
|
||||
- --hostname-override=$(NODE_NAME)
|
||||
- --kubeconfig=/etc/kubernetes/proxy/kubeconfig.yaml
|
||||
- --proxy-mode=iptables
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /healthz
|
||||
# port: 10249
|
||||
# initialDelaySeconds: 15
|
||||
# periodSeconds: 15
|
||||
# failureThreshold: 3
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
set -ex
|
||||
|
||||
iptables-save | grep 'default/kubernetes:https'
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: proxy-cm
|
||||
mountPath: /etc/kubernetes/proxy/kubeconfig.yaml
|
||||
subPath: kubeconfig.yaml
|
||||
- name: proxy-cm
|
||||
mountPath: /etc/kubernetes/proxy/pki/proxy.pem
|
||||
subPath: proxy.pem
|
||||
- name: proxy-cm
|
||||
mountPath: /etc/kubernetes/proxy/pki/cluster-ca.pem
|
||||
subPath: cluster-ca.pem
|
||||
- name: proxy-secret
|
||||
mountPath: /etc/kubernetes/proxy/pki/proxy-key.pem
|
||||
subPath: proxy-key.pem
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: proxy-cm
|
||||
configMap:
|
||||
name: kubernetes-proxy
|
||||
- name: proxy-secret
|
||||
secret:
|
||||
secretName: kubernetes-proxy
|
8
charts/proxy/templates/secret.yaml
Normal file
8
charts/proxy/templates/secret.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubernetes-proxy
|
||||
type: Opaque
|
||||
data:
|
||||
proxy-key.pem: {{ .Values.tls.key | b64enc }}
|
19
charts/proxy/values.yaml
Normal file
19
charts/proxy/values.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
dns_policy: Default
|
||||
name: kubernetes-proxy
|
||||
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
proxy:
|
||||
command: /proxy
|
||||
|
||||
images:
|
||||
proxy: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
|
||||
network:
|
||||
kubernetes_netloc: 10.96.0.1
|
||||
pod_cidr: 10.97.0.0/16
|
||||
|
||||
node_selector: {}
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: generous-permissions
|
1
charts/rbac/values.yaml
Normal file
1
charts/rbac/values.yaml
Normal file
@ -0,0 +1 @@
|
||||
no: options
|
4
charts/scheduler/Chart.yaml
Normal file
4
charts/scheduler/Chart.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: A chart for Kubernetes scheduler.
|
||||
name: scheduler
|
||||
version: 0.1.0
|
68
charts/scheduler/templates/bin/_anchor.tpl
Normal file
68
charts/scheduler/templates/bin/_anchor.tpl
Normal file
@ -0,0 +1,68 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
export MANIFEST_PATH=/host{{ .Values.anchor.kubelet.manifest_path }}/{{ .Values.service.name }}.yaml
|
||||
export ETC_PATH=/host{{ .Values.scheduler.host_etc_path }}
|
||||
|
||||
copy_etc_files() {
|
||||
mkdir -p $ETC_PATH
|
||||
cp /configmap/* /secret/* $ETC_PATH
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
mkdir -p $(dirname $MANIFEST_PATH)
|
||||
cat <<EODOC > $MANIFEST_PATH
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ .Values.service.name }}-service: enabled
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: scheduler
|
||||
image: {{ .Values.images.scheduler }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- {{ .Values.scheduler.command }}
|
||||
- --leader-elect=true
|
||||
- --kubeconfig=/etc/kubernetes/scheduler/kubeconfig.yaml
|
||||
- --v=5
|
||||
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /etc/kubernetes/scheduler
|
||||
volumes:
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: {{ .Values.scheduler.host_etc_path }}
|
||||
EODOC
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
rm -f $MANIFEST_PATH
|
||||
rm -rf $ETC_PATH
|
||||
}
|
||||
|
||||
while true; do
|
||||
if [ -e /tmp/stop ]; then
|
||||
echo Stopping
|
||||
cleanup
|
||||
break
|
||||
fi
|
||||
|
||||
if [ ! -e $MANIFEST_PATH ]; then
|
||||
copy_etc_files
|
||||
create_manifest
|
||||
fi
|
||||
|
||||
sleep {{ .Values.anchor.period }}
|
||||
done
|
4
charts/scheduler/templates/bin/_pre_stop.tpl
Normal file
4
charts/scheduler/templates/bin/_pre_stop.tpl
Normal file
@ -0,0 +1,4 @@
|
||||
set -x
|
||||
|
||||
touch /tmp/stop
|
||||
sleep 10000
|
10
charts/scheduler/templates/configmap-bin.yaml
Normal file
10
charts/scheduler/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
data:
|
||||
anchor: |+
|
||||
{{ tuple "bin/_anchor.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pre_stop: |+
|
||||
{{ tuple "bin/_pre_stop.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
30
charts/scheduler/templates/configmap-etc.yaml
Normal file
30
charts/scheduler/templates/configmap-etc.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
data:
|
||||
kubeconfig.yaml: |-
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://{{ .Values.network.kubernetes_netloc }}
|
||||
certificate-authority: cluster-ca.pem
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: scheduler
|
||||
name: scheduler@kubernetes
|
||||
current-context: scheduler@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: scheduler
|
||||
user:
|
||||
client-certificate: scheduler.pem
|
||||
client-key: scheduler-key.pem
|
||||
|
||||
cluster-ca.pem: {{ .Values.tls.ca | quote }}
|
||||
scheduler.pem: {{ .Values.tls.cert | quote }}
|
69
charts/scheduler/templates/daemonset.yaml
Normal file
69
charts/scheduler/templates/daemonset.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}-anchor
|
||||
labels:
|
||||
application: kubernetes
|
||||
component: kubernetes-scheduler-anchor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
labels:
|
||||
{{ .Values.service.name | quote }}: anchor
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.anchor.dns_policy }}
|
||||
nodeSelector:
|
||||
{{ .Values.anchor.node_selector.key }}: {{ .Values.anchor.node_selector.value }}
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: anchor
|
||||
image: {{ .Values.images.anchor }}
|
||||
command:
|
||||
- /tmp/bin/anchor
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/bin/pre_stop
|
||||
volumeMounts:
|
||||
- name: bin
|
||||
mountPath: /tmp/bin
|
||||
- name: etc
|
||||
mountPath: /configmap
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: secret
|
||||
mountPath: /secret
|
||||
terminationGracePeriodSeconds: {{ .Values.anchor.termination_grace_period }}
|
||||
volumes:
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-bin
|
||||
defaultMode: 0555
|
||||
- name: etc
|
||||
configMap:
|
||||
name: {{ .Values.service.name }}-etc
|
||||
defaultMode: 0444
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: {{ .Values.service.name }}
|
||||
defaultMode: 0444
|
8
charts/scheduler/templates/secret.yaml
Normal file
8
charts/scheduler/templates/secret.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
type: Opaque
|
||||
data:
|
||||
scheduler-key.pem: {{ .Values.tls.key | b64enc }}
|
28
charts/scheduler/values.yaml
Normal file
28
charts/scheduler/values.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
anchor:
|
||||
dns_policy: Default
|
||||
kubelet:
|
||||
manifest_path: /etc/kubernetes/manifests
|
||||
node_selector:
|
||||
key: kubernetes-scheduler
|
||||
value: enabled
|
||||
period: 15
|
||||
termination_grace_period: 3600
|
||||
|
||||
scheduler:
|
||||
command: /scheduler
|
||||
host_etc_path: /etc/kubernetes/scheduler
|
||||
|
||||
service:
|
||||
name: kubernetes-scheduler
|
||||
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
|
||||
network:
|
||||
kubernetes_netloc: 10.96.0.1
|
32
cleanup.sh
32
cleanup.sh
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
|
||||
export KUBECONFIG=/etc/kubernetes/admin/kubeconfig.yaml
|
||||
kubectl drain --force $(hostname)
|
||||
|
||||
systemctl stop kubelet
|
||||
docker rm -fv $(docker ps -aq)
|
||||
|
||||
systemctl stop docker
|
||||
|
||||
apt-get remove -qq -y dnsmasq ceph-common
|
||||
|
||||
systemctl daemon-reload
|
||||
|
||||
rm -rf \
|
||||
/etc/dnsmasq.d/kubernetes-masters \
|
||||
/etc/dnsmasq.d/upstream-dns \
|
||||
/etc/docker \
|
||||
/etc/kubernetes \
|
||||
/etc/systemd/system/docker.service.d \
|
||||
/etc/systemd/system/kubelet \
|
||||
/opt/cni \
|
||||
/usr/local/bin/bootstrap \
|
||||
/usr/local/bin/helm \
|
||||
/usr/local/bin/kubectl \
|
||||
/usr/local/bin/kubelet \
|
||||
/var/lib/auxiliary-etcd-0 \
|
||||
/var/lib/auxiliary-etcd-1 \
|
||||
/var/lib/kube-etcd \
|
||||
/var/lib/prom.done
|
@ -99,7 +99,9 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
# NOTE(mark-burnett): Currently, we don't have any static files and the
|
||||
# non-existence of this directory causes a sphinx exception.
|
||||
#html_static_path = ['_static']
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
@ -1,165 +0,0 @@
|
||||
Promenade Configuration
|
||||
=======================
|
||||
|
||||
Promenade is configured using a set Kubernetes-like YAML documents. Many of
|
||||
these documents can be automatically derived from a few core configuration
|
||||
documents or generated automatically (e.g. certificates). All of these
|
||||
documents can be specified in detail allowing for fine-grained control over
|
||||
cluster deployment.
|
||||
|
||||
Generally, these documents have the following form:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
---
|
||||
apiVersion: promenade/v1
|
||||
kind: Kind
|
||||
metadata:
|
||||
compliant: metadata
|
||||
spec:
|
||||
detailed: data
|
||||
|
||||
``apiVersion`` identifies the document as Promenade configuration. Currently
|
||||
only ``promenade/v1`` is supported.
|
||||
|
||||
``kind`` describe the detailed type of document. Valid kinds are:
|
||||
|
||||
- ``Certificate`` - An x509 certificate.
|
||||
- ``CertificateAuthority`` - An x509 certificate authority certificate.
|
||||
- ``CertificateAuthorityKey`` - The private key for a certificate authority.
|
||||
- ``CertificateKey`` - The private key for a certificate.
|
||||
- ``Cluster`` - Cluster configuration containing node host names, IPs & roles.
|
||||
- ``Etcd`` - Specific configuration for an etcd cluster.
|
||||
- ``Masters`` - Host names and IPs of master nodes.
|
||||
- ``Network`` - Configuration details for Kubernetes networking components.
|
||||
- ``Node`` - Specific configuration for a single host.
|
||||
- ``PrivateKey`` - A private key, e.g. the ``controller-manager``'s token signing key.
|
||||
- ``PublicKey`` - A public key, e.g. the key for verifying service account tokens.
|
||||
- ``Versions`` - Specifies versions of packages and images to be deployed.
|
||||
|
||||
``metadata`` are used to select specific documents of a given ``kind``. For
|
||||
example, the various services must each select their specific ``Certificate``s.
|
||||
``metadata`` are also used by Drydock to select the configuration files that are
|
||||
needed for a particular node.
|
||||
|
||||
``spec`` contains specific data for each kind of configuration document.
|
||||
|
||||
Additionally, documents for [Armada](https://github.com/att-comdev/armada) are
|
||||
allowed and will be applied after CNI and DNS are deployed.
|
||||
|
||||
Generating Configuration from Minimal Input
|
||||
-------------------------------------------
|
||||
|
||||
To construct a complete set of cluster configuration, the minimal input are
|
||||
``Cluster``, ``Network`` and ``Versions`` documents. To see complete examples of
|
||||
these, please see the [example](example/vagrant-input-config.yaml).
|
||||
|
||||
The ``Cluster`` configuration must contain an entry for each host for which
|
||||
configuration should be generated. Each host must contain an ``ip``, and
|
||||
optionally ``roles`` and ``additional_labels``. Valid ``roles`` are currently
|
||||
``genesis`` and ``master``. ``additional_labels`` are Kubernetes labels which will
|
||||
be added to the node.
|
||||
|
||||
Here's an example ``Cluster`` document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apiVersion: promenade/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: example
|
||||
target: none
|
||||
spec:
|
||||
nodes:
|
||||
n0:
|
||||
ip: 192.168.77.10
|
||||
roles:
|
||||
- master
|
||||
- genesis
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
|
||||
The ``Network`` document must contain:
|
||||
|
||||
- ````cluster_domain```` - The domain for the cluster, e.g. ``cluster.local``.
|
||||
- ````cluster_dns```` - The IP of the cluster DNS,e .g. ``10.96.0.10``.
|
||||
- ``kube_service_ip`` - The IP of the ``kubernetes`` service, e.g. ``10.96.0.1``.
|
||||
- ``pod_ip_cidr`` - The CIDR from which pod IPs will be assigned, e.g. ``10.97.0.0/16``.
|
||||
- ``service_ip_cidr`` - The CIDR from which service IPs will be assigned, e.g. ``10.96.0.0/16``.
|
||||
- ``etcd_service_ip`` - The IP address of the ``etcd`` service, e.g. ``10.96.232.136``.
|
||||
- ``dns_servers`` - A list of upstream DNS server IPs.
|
||||
|
||||
Optionally, proxy settings can be specified here as well. These should all
|
||||
generally be set together: ``http_proxy``, ``https_proxy``, ``no_proxy``. ``no_proxy``
|
||||
must include all master IP addresses, and the ``kubernetes`` service name.
|
||||
|
||||
Here's an example ``Network`` document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apiVersion: promenade/v1
|
||||
kind: Network
|
||||
metadata:
|
||||
cluster: example
|
||||
name: example
|
||||
target: all
|
||||
spec:
|
||||
cluster_domain: cluster.local
|
||||
cluster_dns: 10.96.0.10
|
||||
kube_service_ip: 10.96.0.1
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
service_ip_cidr: 10.96.0.0/16
|
||||
etcd_service_ip: 10.96.232.136
|
||||
dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
http_proxy: http://proxy.example.com:8080
|
||||
https_proxy: http://proxy.example.com:8080
|
||||
no_proxy: 192.168.77.10,192.168.77.11,192.168.77.12,127.0.0.1,kubernetes,kubernetes.default.svc.cluster.local
|
||||
|
||||
The ``Versions`` document must define the Promenade image to be used and the
|
||||
Docker package version. Currently, only the versions specified for these two
|
||||
items are respected.
|
||||
|
||||
Here's an example ``Versions`` document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apiVersion: promenade/v1
|
||||
kind: Versions
|
||||
metadata:
|
||||
cluster: example
|
||||
name: example
|
||||
target: all
|
||||
spec:
|
||||
images:
|
||||
promenade: quay.io/attcomdev/promenade:latest
|
||||
packages:
|
||||
docker: docker.io=1.12.6-0ubuntu1~16.04.1
|
||||
|
||||
Given these documents (see the [example](example/vagrant-input-config.yaml)),
|
||||
Promenade can derive the remaining configuration and generate certificates and
|
||||
keys using the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
mkdir -p configs
|
||||
docker run --rm -t \
|
||||
-v $(pwd):/target \
|
||||
quay.io/attcomdev/promenade:latest \
|
||||
promenade -v generate \
|
||||
-c /target/example/vagrant-input-config.yaml \
|
||||
-o /target/configs
|
||||
|
||||
This will generate the following files in the ``configs`` directory:
|
||||
|
||||
- ``up.sh`` - A script which will bring up a node to create or join a cluster.
|
||||
- ``admin-bundle.yaml`` - A collection of generated certificates, private keys
|
||||
and core configuration.
|
||||
- ``complete-bundle.yaml`` - A set of generated documents suitable for upload
|
||||
into Drydock for future delivery to nodes to be provisioned to join the
|
||||
cluster.
|
||||
|
||||
Additionally, a YAML file for each host described in the ``Cluster`` document
|
||||
will be placed here. These files each contain every document needed for that
|
||||
particular node to create or join the cluster.
|
29
docs/source/configuration/docker.rst
Normal file
29
docs/source/configuration/docker.rst
Normal file
@ -0,0 +1,29 @@
|
||||
Docker
|
||||
======
|
||||
|
||||
Configuration for the docker daemon. This document contains a single `config`
|
||||
key that directly translates into the contents of the `daemon.json` file
|
||||
described in `Docker's configuration`_.
|
||||
|
||||
|
||||
Sample Document
|
||||
---------------
|
||||
|
||||
Here is a sample document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schema: promenade/Docker/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: docker
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
config:
|
||||
live-restore: true
|
||||
storage-driver: overlay2
|
||||
|
||||
|
||||
.. _Docker's configuration: https://docs.docker.com/engine/reference/commandline/dockerd/
|
66
docs/source/configuration/genesis.rst
Normal file
66
docs/source/configuration/genesis.rst
Normal file
@ -0,0 +1,66 @@
|
||||
Genesis
|
||||
=======
|
||||
|
||||
Specific configuration for the genesis process. This document is a strict
|
||||
superset of the combination of :doc:`kubernetes-node` and :doc:`host-system`,
|
||||
so only differences are discussed here.
|
||||
|
||||
|
||||
Sample Document
|
||||
---------------
|
||||
|
||||
Here is a complete sample document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schema: promenade/Genesis/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: genesis
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n0
|
||||
ip: 192.168.77.10
|
||||
labels:
|
||||
static:
|
||||
- calico-etcd=enabled
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- promenade-genesis=enabled
|
||||
- ucp-control-plane=enabled
|
||||
images:
|
||||
armada: quay.io/attcomdev/armada:latest
|
||||
helm:
|
||||
tiller: gcr.io/kubernetes-helm/tiller:v2.5.1
|
||||
kubernetes:
|
||||
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
controller-manager: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
etcd: quay.io/coreos/etcd:v3.0.17
|
||||
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
files:
|
||||
- path: /var/lib/anchor/calico-etcd-bootstrap
|
||||
content: ""
|
||||
mode: 0644
|
||||
|
||||
|
||||
Bootstrapping Images
|
||||
--------------------
|
||||
|
||||
Bootstrapping images are specified in the top level key ``images``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
armada: <Armada image for bootstrapping>
|
||||
helm:
|
||||
tiller: <Tiller image for bootstrapping>
|
||||
kubernetes:
|
||||
apiserver: <API server image for bootstrapping>
|
||||
controller-manager: <Controller Manager image for bootstrapping>
|
||||
etcd: <etcd image for bootstrapping>
|
||||
scheduler: <Scheduler image for bootstrapping>
|
133
docs/source/configuration/host-system.rst
Normal file
133
docs/source/configuration/host-system.rst
Normal file
@ -0,0 +1,133 @@
|
||||
HostSystem
|
||||
==========
|
||||
|
||||
Sample Document
|
||||
---------------
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schema: promenade/HostSystem/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: host-system
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
files:
|
||||
- path: /opt/kubernetes/bin/kubelet
|
||||
tar_url: https://dl.k8s.io/v1.8.0/kubernetes-node-linux-amd64.tar.gz
|
||||
tar_path: kubernetes/node/bin/kubelet
|
||||
mode: 0555
|
||||
images:
|
||||
coredns: coredns/coredns:011
|
||||
helm:
|
||||
helm: lachlanevenson/k8s-helm:v2.5.1
|
||||
kubernetes:
|
||||
kubectl: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
packages:
|
||||
repositories:
|
||||
- deb http://apt.dockerproject.org/repo ubuntu-xenial main
|
||||
keys:
|
||||
- |-
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
additional:
|
||||
- curl
|
||||
- jq
|
||||
required:
|
||||
docker: docker-engine=1.13.1-0~ubuntu-xenial
|
||||
socat: socat=1.7.3.1-1
|
||||
|
||||
|
||||
Files
|
||||
-----
|
||||
|
||||
A list of files to be written to the host. Files can be given as precise content or extracted from a tarball specified by url:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- path: /etc/direct-content
|
||||
content: |-
|
||||
This
|
||||
exact
|
||||
text
|
||||
- path: /etc/from-tar
|
||||
tar_url: http://example.com/file
|
||||
tar_source: dir/file.txt
|
||||
|
||||
Images
|
||||
------
|
||||
|
||||
Core Images
|
||||
^^^^^^^^^^^
|
||||
|
||||
These images are used for essential functionality:
|
||||
|
||||
``coredns``
|
||||
coredns_ is configured and used for Kubernetes API discovery during
|
||||
bootstrapping.
|
||||
|
||||
``kubectl``
|
||||
Used for label application and validation tasks during bootstrapping.
|
||||
|
||||
|
||||
Convenience Images
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``helm`` image is available for convenience.
|
||||
|
||||
|
||||
.. _coredns: https://github.com/coredns/coredns
|
||||
|
||||
|
||||
Packages
|
||||
--------
|
||||
|
||||
Repository Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Additional APT repositories can be configured using the ``repositories`` and
|
||||
``keys`` fields of the ``SystemPackages`` document:
|
||||
|
||||
``repositories``
|
||||
A list of APT source lines to be configured during genesis or join.
|
||||
|
||||
``keys``
|
||||
A list of public PGP keys that can be used to verify installed packages.
|
||||
|
||||
|
||||
Package Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``required`` key specifies packages that are required for all deployments,
|
||||
and the ``additional`` key allows arbitrary additional system packages to be
|
||||
installed. The ``additional`` key is particularly useful for installing
|
||||
packages such as `ceph-common`.
|
26
docs/source/configuration/index.rst
Normal file
26
docs/source/configuration/index.rst
Normal file
@ -0,0 +1,26 @@
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Promenade is configured using a set of Deckhand_ compatible configuration
|
||||
documents and a bootstrapping Armada_ manifest that is responsible for
|
||||
deploying core components into the cluster.
|
||||
|
||||
Details about Promenade-specific documents can be found here:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Documents
|
||||
|
||||
docker
|
||||
genesis
|
||||
host-system
|
||||
kubernetes-network
|
||||
kubernetes-node
|
||||
|
||||
|
||||
The provided Armada_ manifest and will be applied on the genesis node as soon
|
||||
as it is healthy.
|
||||
|
||||
|
||||
.. _Armada: https://github.com/att-comdev/armada
|
||||
.. _Deckhand: https://github.com/att-comdev/deckhand
|
85
docs/source/configuration/kubernetes-network.rst
Normal file
85
docs/source/configuration/kubernetes-network.rst
Normal file
@ -0,0 +1,85 @@
|
||||
Kubernetes Network
|
||||
==================
|
||||
|
||||
Configuration for Kubernetes networking during bootstrapping and for the
|
||||
``kubelet``.
|
||||
|
||||
|
||||
Sample Document
|
||||
---------------
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schema: promenade/KubernetesNetwork/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-network
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
dns:
|
||||
cluster_domain: cluster.local
|
||||
service_ip: 10.96.0.10
|
||||
bootstrap_validation_checks:
|
||||
- calico-etcd.kube-system.svc.cluster.local
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- kubernetes.default.svc.cluster.local
|
||||
upstream_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
kubernetes:
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
service_ip: 10.96.0.1
|
||||
|
||||
etcd:
|
||||
service_ip: 10.96.0.2
|
||||
|
||||
hosts_entries:
|
||||
- ip: 192.168.77.1
|
||||
names:
|
||||
- registry
|
||||
|
||||
proxy:
|
||||
url: http://proxy.example.com:8080
|
||||
additional_no_proxy:
|
||||
- 192.168.77.1
|
||||
|
||||
|
||||
DNS
|
||||
---
|
||||
|
||||
The data in the ``dns`` key is used for bootstrapping and ``kubelet``
|
||||
configuration of cluster and host-level DNS, which is provided by coredns_.
|
||||
|
||||
``bootstrap_validation_checks``
|
||||
Domain names to resolve during the genesis and join processes for validation.
|
||||
|
||||
``cluster_domain``
|
||||
The Kubernetes cluster domain. Used by the ``kubelet``.
|
||||
|
||||
``service_ip``
|
||||
The IP to use for cluster DNS. Used by the ``kubelet``.
|
||||
|
||||
``upstream_servers``
|
||||
Upstream DNS servers to be configured in `/etc/resolv.conf`.
|
||||
|
||||
|
||||
Kubernetes
|
||||
----------
|
||||
|
||||
The ``kubernetes`` key contains:
|
||||
|
||||
``pod_cidr``
|
||||
The CIDR from which the Kubernetes Controller Manager assigns pod IPs.
|
||||
|
||||
``service_cidr``
|
||||
The CIDR from which the Kubernetes Controller Manager assigns service IPs.
|
||||
|
||||
``service_ip``
|
||||
The in-cluster Kubernetes service IP.
|
||||
|
||||
|
||||
.. _coredns: https://github.com/coredns/coredns
|
55
docs/source/configuration/kubernetes-node.rst
Normal file
55
docs/source/configuration/kubernetes-node.rst
Normal file
@ -0,0 +1,55 @@
|
||||
Kubernetes Node
|
||||
===============
|
||||
|
||||
Configuration for a basic node in the cluster.
|
||||
|
||||
|
||||
Sample Document
|
||||
---------------
|
||||
|
||||
Here is a sample document:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schema: promenade/KubernetesNode/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: n1
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n1
|
||||
ip: 192.168.77.11
|
||||
join_ip: 192.168.77.10
|
||||
labels:
|
||||
static:
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- calico-etcd=enabled
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- ucp-control-plane=enabled
|
||||
|
||||
|
||||
Host Information
|
||||
----------------
|
||||
|
||||
Essential host-specific information is specified in this document, including
|
||||
the ``hostname``, ``ip``, and ``join_ip``.
|
||||
|
||||
The ``join_ip`` is used to specify which host should be used when adding a node
|
||||
to the cluster.
|
||||
|
||||
|
||||
Labels
|
||||
------
|
||||
|
||||
Kubernetes labels can be specified under the ``labels`` key in two ways:
|
||||
|
||||
1. Via the ``static`` key, which is a list of labels to be applied immediately
|
||||
when the ``kubelet`` process starts.
|
||||
2. Via the ``dynamic`` key, which is a list of labels to be applied after the
|
||||
node is marked as `Ready` by Kubernetes.
|
@ -7,74 +7,77 @@ Development
|
||||
Deployment using Vagrant
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Initial Setup of Vagrant
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Deployment using Vagrant uses KVM instead of Virtualbox due to better
|
||||
performance of disk and networking, which both have significant impact on the
|
||||
stability of the etcd clusters.
|
||||
|
||||
Make sure you have [Vagrant](https://vagrantup.com) installed, then
|
||||
run `./tools/full-vagrant-setup.sh`, which will do the following:
|
||||
run `./tools/vagrant/full-vagrant-setup.sh`, which will do the following:
|
||||
|
||||
* Install Vagrant libvirt plugin and its dependencies
|
||||
* Install NFS dependencies for Vagrant volume sharing
|
||||
* Install [packer](https://packer.io) and build a KVM image for Ubuntu 16.04
|
||||
|
||||
Generate the per-host configuration, certificates and keys to be used:
|
||||
Deployment
|
||||
~~~~~~~~~~
|
||||
A complete set of configuration that works with the `Vagrantfile` in the
|
||||
top-level directory is provided in the `example` directory.
|
||||
|
||||
To exercise that example, first generate certs and combine the configuration
|
||||
into usable parts:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
mkdir configs
|
||||
docker run --rm -t -v $(pwd):/target quay.io/attcomdev/promenade:latest promenade -v generate -c /target/example/vagrant-input-config.yaml -o /target/configs
|
||||
|
||||
./tools/build-example.sh
|
||||
|
||||
Start the VMs:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant up
|
||||
vagrant up --parallel
|
||||
|
||||
Start the genesis node:
|
||||
Then bring up the genesis node:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh n0 -c 'sudo bash /vagrant/configs/up.sh /vagrant/configs/n0.yaml'
|
||||
vagrant ssh n0 -c 'sudo /vagrant/example/scripts/genesis.sh'
|
||||
|
||||
Join the master nodes:
|
||||
Join additional master nodes:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh n1 -c 'sudo bash /vagrant/configs/up.sh /vagrant/configs/n1.yaml'
|
||||
vagrant ssh n2 -c 'sudo bash /vagrant/configs/up.sh /vagrant/configs/n2.yaml'
|
||||
vagrant ssh n1 -c 'sudo /vagrant/example/scripts/join-n1.sh'
|
||||
vagrant ssh n2 -c 'sudo /vagrant/example/scripts/join-n2.sh'
|
||||
|
||||
Join the worker node:
|
||||
Re-provision the genesis node as a normal master:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh n3 -c 'sudo bash /vagrant/configs/up.sh /vagrant/configs/n3.yaml'
|
||||
vagrant ssh n0 -c 'sudo promenade-teardown'
|
||||
vagrant ssh n1 -c 'sudo kubectl delete node n0'
|
||||
vagrant destroy -f n0
|
||||
vagrant up n0
|
||||
vagrant ssh n0 -c 'sudo /vagrant/example/scripts/join-n0.sh'
|
||||
|
||||
Join the remaining worker:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh n3 -c 'sudo /vagrant/example/scripts/join-n3.sh'
|
||||
|
||||
|
||||
Building the image
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To build the image directly, you can use the standard Docker build command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build -t promenade:local .
|
||||
|
||||
|
||||
For development, you may wish to save it and have the `up.sh` script load it:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker save -o promenade.tar promenade:local
|
||||
|
||||
|
||||
Then on a node:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
PROMENADE_LOAD_IMAGE=/vagrant/promenade.tar bash /vagrant/up.sh /vagrant/path/to/node-config.yaml
|
||||
|
||||
|
||||
These commands are combined in a convenience script at `tools/dev-build.sh`.
|
||||
|
||||
To build the image from behind a proxy, you can:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -84,8 +87,19 @@ To build the image from behind a proxy, you can:
|
||||
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$http_proxy --build-arg no_proxy=$no_proxy -t promenade:local .
|
||||
|
||||
|
||||
For convenience, there is a script which builds an image from the current code,
|
||||
then uses it to construct scripts for the example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./tools/dev-build.sh
|
||||
|
||||
*NOTE* the ``dev-build.sh`` script puts Promenade in debug mode, which will
|
||||
instruct it to use Vagrant's shared directory to source local charts.
|
||||
|
||||
|
||||
Using Promenade Behind a Proxy
|
||||
------------------------------
|
||||
|
||||
To use Promenade from behind a proxy, use the proxy settings described in the
|
||||
[configuration docs](configuration.md).
|
||||
To use Promenade from behind a proxy, use the proxy settings see
|
||||
:doc:`configuration/kubernetes-network`.
|
||||
|
@ -31,4 +31,4 @@ Promenade Configuration Guide
|
||||
:maxdepth: 2
|
||||
|
||||
getting-started
|
||||
configuration
|
||||
configuration/index
|
||||
|
2
example/.gitignore
vendored
Normal file
2
example/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
certificates.yaml
|
||||
scripts
|
958
example/bootstrap-armada-config.yaml
Normal file
958
example/bootstrap-armada-config.yaml
Normal file
@ -0,0 +1,958 @@
|
||||
---
|
||||
schema: armada/Manifest/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: cluster-bootstrap
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
release_prefix: ucp
|
||||
chart_groups:
|
||||
- kubernetes-proxy
|
||||
- container-networking
|
||||
- dns
|
||||
- kubernetes
|
||||
- kubernetes-rbac
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-proxy
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
description: Kubernetes proxy
|
||||
sequenced: true
|
||||
chart_group:
|
||||
- kubernetes-proxy
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: container-networking
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
description: Container networking via Calico
|
||||
sequenced: true
|
||||
chart_group:
|
||||
- calico-etcd
|
||||
- calico
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: dns
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
description: Cluster DNS
|
||||
chart_group:
|
||||
- coredns
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-rbac
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
description: Role Based Access Control configuration for Kubernetes
|
||||
sequenced: true
|
||||
chart_group:
|
||||
- kubernetes-rbac
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
description: Kubernetes components
|
||||
chart_group:
|
||||
- kubernetes-etcd
|
||||
- kubernetes-apiserver
|
||||
- kubernetes-controller-manager
|
||||
- kubernetes-scheduler
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: helm-toolkit
|
||||
data:
|
||||
chart_name: helm-toolkit
|
||||
release: helm-toolkit
|
||||
namespace: helm-toolkit
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values: {}
|
||||
source:
|
||||
type: git
|
||||
location: https://git.openstack.org/openstack/openstack-helm
|
||||
subpath: helm-toolkit
|
||||
reference: master
|
||||
dependencies: []
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-proxy
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.ca'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: proxy
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: proxy
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.key'
|
||||
data:
|
||||
chart_name: proxy
|
||||
release: kubernetes-proxy
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
images:
|
||||
proxy: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
network:
|
||||
kubernetes_netloc: apiserver.kubernetes.promenade:6443
|
||||
pod_cidr: 10.97.0.0/16
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: proxy
|
||||
dependencies: []
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: calico-etcd
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: calico-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.client.ca'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: calico-etcd-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.peer.ca'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-anchor
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.anchor.tls.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-anchor
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.anchor.tls.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n0
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n0
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n0-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n0-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.peer.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n1
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n1
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n1-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n1-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.peer.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n2
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n2
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n2-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n2-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.peer.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n3
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[3].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n3
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[3].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-n3-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[3].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-n3-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[3].tls.peer.key'
|
||||
|
||||
data:
|
||||
chart_name: etcd
|
||||
release: calico-etcd
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
anchor:
|
||||
etcdctl_endpoint: 10.96.232.136
|
||||
node_selector:
|
||||
key: calico-etcd
|
||||
value: enabled
|
||||
tls:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
etcd:
|
||||
host_data_path: /var/lib/etcd/calico
|
||||
host_etc_path: /etc/etcd/calico
|
||||
bootstrapping:
|
||||
enabled: true
|
||||
host_directory: /var/lib/anchor
|
||||
filename: calico-etcd-bootstrap
|
||||
images:
|
||||
etcd: quay.io/coreos/etcd:v3.0.17
|
||||
etcdctl: quay.io/coreos/etcd:v3.0.17
|
||||
nodes:
|
||||
- name: n0
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
- name: n1
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
- name: n2
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
# n3 is here to demonstrate movability of the cluster
|
||||
- name: n3
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
service:
|
||||
name: calico-etcd
|
||||
ip: 10.96.232.136
|
||||
client:
|
||||
port: 6666
|
||||
target_port: 6666
|
||||
peer:
|
||||
port: 6667
|
||||
target_port: 6667
|
||||
tls:
|
||||
client:
|
||||
ca: placeholder
|
||||
peer:
|
||||
ca: placeholder
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: etcd
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: calico
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: calico-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.etcd.tls.ca'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-node
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.etcd.tls.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-node
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.etcd.tls.key'
|
||||
data:
|
||||
chart_name: calico
|
||||
release: calico
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
calico:
|
||||
ip_autodetection_method: interface=ens6
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
ctl:
|
||||
install_on_host: true
|
||||
etcd:
|
||||
service:
|
||||
ip: 10.96.232.136
|
||||
port: 6666
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
images:
|
||||
cni: quay.io/calico/cni:v1.11.0
|
||||
ctl: quay.io/calico/ctl:v1.6.1
|
||||
node: quay.io/calico/node:v2.6.1
|
||||
policy_controller: quay.io/calico/kube-controllers:v1.0.0
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: calico
|
||||
dependencies: []
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: coredns
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.ca'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: coredns
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: coredns
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.key'
|
||||
data:
|
||||
chart_name: coredns
|
||||
release: coredns
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
coredns:
|
||||
cluster_domain: cluster.local
|
||||
upstream_nameservers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
zones:
|
||||
- name: promenade
|
||||
services:
|
||||
- bind_name: apiserver.kubernetes
|
||||
service:
|
||||
name: kubernetes-apiserver
|
||||
namespace: kube-system
|
||||
- bind_name: etcd.kubernetes
|
||||
service:
|
||||
name: kubernetes-etcd
|
||||
namespace: kube-system
|
||||
- bind_name: etcd.calico
|
||||
service:
|
||||
name: calico-etcd
|
||||
namespace: kube-system
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
coredns: coredns/coredns:011
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
network:
|
||||
kubernetes_netloc: apiserver.kubernetes.promenade:6443
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: coredns
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-apiserver
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.ca
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: apiserver
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.cert
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: apiserver
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.key
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.apiserver.etcd.tls.ca
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: apiserver-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.apiserver.etcd.tls.cert
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: apiserver-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.apiserver.etcd.tls.key
|
||||
-
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: service-account
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.service_account.public_key
|
||||
|
||||
data:
|
||||
chart_name: apiserver
|
||||
release: kubernetes-apiserver
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
apiserver:
|
||||
etcd:
|
||||
endpoints: https://etcd.kubernetes.promenade:2379
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
service_account:
|
||||
public_key: placeholder
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
network:
|
||||
kubernetes_service_ip: 10.96.0.1
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: apiserver
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-controller-manager
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.ca
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: controller-manager
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.cert
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: controller-manager
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.key
|
||||
-
|
||||
src:
|
||||
schema: deckhand/PrivateKey/v1
|
||||
name: service-account
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.service_account.private_key
|
||||
|
||||
data:
|
||||
chart_name: controller_manager
|
||||
release: kubernetes-controller-manager
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
controller_manager: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
service_account:
|
||||
private_key: placeholder
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
network:
|
||||
kubernetes_netloc: apiserver.kubernetes.promenade:6443
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: controller_manager
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-scheduler
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.ca
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: scheduler
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.cert
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: scheduler
|
||||
path: $
|
||||
dest:
|
||||
path: $.values.tls.key
|
||||
|
||||
data:
|
||||
chart_name: scheduler
|
||||
release: kubernetes-scheduler
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
tls:
|
||||
ca: placeholder
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
|
||||
network:
|
||||
kubernetes_netloc: apiserver.kubernetes.promenade:6443
|
||||
|
||||
images:
|
||||
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: scheduler
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-etcd
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
substitutions:
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes-etcd
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.client.ca'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
name: kubernetes-etcd-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.tls.peer.ca'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-anchor
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.anchor.tls.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-anchor
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.anchor.tls.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n0
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n0
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n0-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n0-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[0].tls.peer.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n1
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n1
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n1-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n1-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[1].tls.peer.key'
|
||||
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n2
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.client.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n2
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.client.key'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-n2-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.peer.cert'
|
||||
-
|
||||
src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-n2-peer
|
||||
path: $
|
||||
dest:
|
||||
path: '$.values.nodes[2].tls.peer.key'
|
||||
|
||||
data:
|
||||
chart_name: etcd
|
||||
release: kubernetes-etcd
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
values:
|
||||
anchor:
|
||||
etcdctl_endpoint: 10.96.0.2
|
||||
node_selector:
|
||||
key: kubernetes-etcd
|
||||
value: enabled
|
||||
tls:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
etcd:
|
||||
host_data_path: /var/lib/etcd/kubernetes
|
||||
host_etc_path: /etc/etcd/kubernetes
|
||||
images:
|
||||
etcd: quay.io/coreos/etcd:v3.0.17
|
||||
etcdctl: quay.io/coreos/etcd:v3.0.17
|
||||
nodes:
|
||||
- name: n0
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
- name: n1
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
- name: n2
|
||||
tls:
|
||||
client:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
peer:
|
||||
cert: placeholder
|
||||
key: placeholder
|
||||
service:
|
||||
name: kubernetes-etcd
|
||||
ip: 10.96.0.2
|
||||
client:
|
||||
port: 2379
|
||||
target_port: 2379
|
||||
peer:
|
||||
port: 2380
|
||||
target_port: 2380
|
||||
tls:
|
||||
client:
|
||||
ca: placeholder
|
||||
peer:
|
||||
ca: placeholder
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: etcd
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-rbac
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
chart_name: rbac
|
||||
release: rbac
|
||||
namespace: kube-system
|
||||
timeout: 600
|
||||
values: {}
|
||||
upgrade:
|
||||
no_hooks: true
|
||||
source:
|
||||
type: local
|
||||
location: /etc/genesis/armada/assets/charts
|
||||
subpath: rbac
|
||||
dependencies: []
|
||||
...
|
@ -1,16 +0,0 @@
|
||||
{
|
||||
"CN": "Kubernetes",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 4096
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Saint Louis",
|
||||
"O": "Kubernetes",
|
||||
"OU": "CA",
|
||||
"ST": "Missouri"
|
||||
}
|
||||
]
|
||||
}
|
36
example/genesis-config.yaml
Normal file
36
example/genesis-config.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
schema: promenade/Genesis/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: genesis
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n0
|
||||
ip: 192.168.77.10
|
||||
labels:
|
||||
static:
|
||||
- calico-etcd=enabled
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- promenade-genesis=enabled
|
||||
- ucp-control-plane=enabled
|
||||
images:
|
||||
armada: quay.io/attcomdev/armada:latest
|
||||
helm:
|
||||
tiller: gcr.io/kubernetes-helm/tiller:v2.5.1
|
||||
kubernetes:
|
||||
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
controller-manager: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
etcd: quay.io/coreos/etcd:v3.0.17
|
||||
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
files:
|
||||
- path: /var/lib/anchor/calico-etcd-bootstrap
|
||||
content: ""
|
||||
mode: 0644
|
||||
...
|
82
example/joining-host-config.yaml
Normal file
82
example/joining-host-config.yaml
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
schema: promenade/KubernetesNode/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: n0
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n0
|
||||
ip: 192.168.77.10
|
||||
join_ip: 192.168.77.11
|
||||
labels:
|
||||
static:
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- calico-etcd=enabled
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- ucp-control-plane=enabled
|
||||
---
|
||||
schema: promenade/KubernetesNode/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: n1
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n1
|
||||
ip: 192.168.77.11
|
||||
join_ip: 192.168.77.10
|
||||
labels:
|
||||
static:
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- calico-etcd=enabled
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- ucp-control-plane=enabled
|
||||
---
|
||||
schema: promenade/KubernetesNode/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: n2
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n2
|
||||
ip: 192.168.77.12
|
||||
join_ip: 192.168.77.10
|
||||
labels:
|
||||
static:
|
||||
- node-role.kubernetes.io/master=
|
||||
dynamic:
|
||||
- calico-etcd=enabled
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- ucp-control-plane=enabled
|
||||
---
|
||||
schema: promenade/KubernetesNode/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: n3
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
hostname: n3
|
||||
ip: 192.168.77.13
|
||||
join_ip: 192.168.77.11
|
||||
labels:
|
||||
dynamic:
|
||||
- ucp-control-plane=enabled
|
||||
...
|
112
example/site-config.yaml
Normal file
112
example/site-config.yaml
Normal file
@ -0,0 +1,112 @@
|
||||
---
|
||||
schema: promenade/KubernetesNetwork/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-network
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
dns:
|
||||
cluster_domain: cluster.local
|
||||
service_ip: 10.96.0.10
|
||||
bootstrap_validation_checks:
|
||||
- calico-etcd.kube-system.svc.cluster.local
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- kubernetes.default.svc.cluster.local
|
||||
upstream_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
|
||||
kubernetes:
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
service_ip: 10.96.0.1
|
||||
|
||||
etcd:
|
||||
service_ip: 10.96.0.2
|
||||
|
||||
hosts_entries:
|
||||
- ip: 192.168.77.1
|
||||
names:
|
||||
- registry
|
||||
|
||||
# proxy:
|
||||
# url: http://proxy.example.com:8080
|
||||
# additional_no_proxy:
|
||||
# - 10.0.1.1
|
||||
---
|
||||
schema: promenade/Docker/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: docker
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
config:
|
||||
insecure-registries:
|
||||
- registry:5000
|
||||
live-restore: true
|
||||
storage-driver: overlay2
|
||||
---
|
||||
schema: promenade/HostSystem/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: host-system
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
files:
|
||||
- path: /opt/kubernetes/bin/kubelet
|
||||
tar_url: https://dl.k8s.io/v1.8.0/kubernetes-node-linux-amd64.tar.gz
|
||||
tar_path: kubernetes/node/bin/kubelet
|
||||
mode: 0555
|
||||
images:
|
||||
coredns: coredns/coredns:011
|
||||
helm:
|
||||
helm: lachlanevenson/k8s-helm:v2.5.1
|
||||
kubernetes:
|
||||
kubectl: gcr.io/google_containers/hyperkube-amd64:v1.8.0
|
||||
packages:
|
||||
repositories:
|
||||
- deb http://apt.dockerproject.org/repo ubuntu-xenial main
|
||||
keys:
|
||||
- |-
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
additional:
|
||||
- curl
|
||||
- jq
|
||||
required:
|
||||
docker: docker-engine=1.13.1-0~ubuntu-xenial
|
||||
socat: socat=1.7.3.1-1
|
||||
...
|
@ -1,123 +0,0 @@
|
||||
---
|
||||
apiVersion: promenade/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: example
|
||||
target: none
|
||||
spec:
|
||||
nodes:
|
||||
n0:
|
||||
ip: 192.168.77.10
|
||||
roles:
|
||||
- master
|
||||
- genesis
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n1:
|
||||
ip: 192.168.77.11
|
||||
roles:
|
||||
- master
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n2:
|
||||
ip: 192.168.77.12
|
||||
roles:
|
||||
- master
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
n3:
|
||||
ip: 192.168.77.13
|
||||
roles:
|
||||
- worker
|
||||
additional_labels:
|
||||
- beta.kubernetes.io/arch=amd64
|
||||
---
|
||||
apiVersion: promenade/v1
|
||||
kind: Network
|
||||
metadata:
|
||||
cluster: example
|
||||
name: example
|
||||
target: all
|
||||
spec:
|
||||
cluster_domain: cluster.local
|
||||
cluster_dns: 10.96.0.10
|
||||
kube_service_ip: 10.96.0.1
|
||||
pod_ip_cidr: 10.97.0.0/16
|
||||
service_ip_cidr: 10.96.0.0/16
|
||||
calico_etcd_service_ip: 10.96.232.136
|
||||
calico_interface: enp0s8
|
||||
dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
#http_proxy: http://proxy.example.com:8080
|
||||
#https_proxy: https://proxy.example.com:8080
|
||||
---
|
||||
apiVersion: promenade/v1
|
||||
kind: Versions
|
||||
metadata:
|
||||
cluster: example
|
||||
name: example
|
||||
target: all
|
||||
spec:
|
||||
images:
|
||||
armada: quay.io/attcomdev/armada:latest
|
||||
calico:
|
||||
cni: quay.io/calico/cni:v1.9.1
|
||||
etcd: quay.io/coreos/etcd:v3.2.1
|
||||
node: quay.io/calico/node:v1.3.0
|
||||
policy-controller: quay.io/calico/kube-policy-controller:v0.6.0
|
||||
kubernetes:
|
||||
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.6.8
|
||||
controller-manager: quay.io/attcomdev/kube-controller-manager:v1.6.8
|
||||
dns:
|
||||
dnsmasq: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
|
||||
kubedns: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
|
||||
sidecar: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
|
||||
etcd: quay.io/coreos/etcd:v3.2.1
|
||||
kubectl: gcr.io/google_containers/hyperkube-amd64:v1.6.8
|
||||
proxy: gcr.io/google_containers/hyperkube-amd64:v1.6.8
|
||||
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.6.8
|
||||
promenade: quay.io/attcomdev/promenade:latest
|
||||
tiller: gcr.io/kubernetes-helm/tiller:v2.5.0
|
||||
packages:
|
||||
docker: docker.io=1.12.6-0ubuntu1~16.04.1
|
||||
dnsmasq: dnsmasq=2.75-1ubuntu0.16.04.2
|
||||
socat: socat=1.7.3.1-1
|
||||
additional_packages:
|
||||
- ceph-common=10.2.7-0ubuntu0.16.04.1
|
||||
---
|
||||
schema: armada/Manifest/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: example-application
|
||||
data:
|
||||
release_prefix: example
|
||||
chart_groups:
|
||||
- example-application
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: example-application
|
||||
data:
|
||||
description: Just an example
|
||||
chart_group:
|
||||
- redis
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: redis
|
||||
data:
|
||||
chart_name: redis
|
||||
release: example-redis
|
||||
namespace: default
|
||||
timeout: 600
|
||||
values:
|
||||
persistence:
|
||||
enabled: false
|
||||
source:
|
||||
type: git
|
||||
location: https://github.com/kubernetes/charts.git
|
||||
subpath: stable/redis
|
||||
dependencies: []
|
119
promenade/builder.py
Normal file
119
promenade/builder.py
Normal file
@ -0,0 +1,119 @@
|
||||
from . import logging, renderer
|
||||
import io
|
||||
import itertools
|
||||
import os
|
||||
import requests
|
||||
import tarfile
|
||||
|
||||
__all__ = ['Builder']
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Builder:
|
||||
def __init__(self, config, *, validators=False):
|
||||
self.config = config
|
||||
self.validators = validators
|
||||
self._file_cache = None
|
||||
|
||||
@property
|
||||
def file_cache(self):
|
||||
if not self._file_cache:
|
||||
self._build_file_cache()
|
||||
return self._file_cache
|
||||
|
||||
def _build_file_cache(self):
|
||||
self._file_cache = {}
|
||||
for file_spec in self._file_specs:
|
||||
path = file_spec['path']
|
||||
if 'content' in file_spec:
|
||||
data = file_spec['content']
|
||||
elif 'tar_url' in file_spec:
|
||||
data = _fetch_tar_content(
|
||||
url=file_spec['tar_url'], path=file_spec['tar_path'])
|
||||
self._file_cache[path] = {
|
||||
'path': path,
|
||||
'data': data,
|
||||
'mode': file_spec['mode'],
|
||||
}
|
||||
|
||||
@property
|
||||
def _file_specs(self):
|
||||
return itertools.chain(
|
||||
self.config.get_path('HostSystem:files', []),
|
||||
self.config.get_path('Genesis:files', []))
|
||||
|
||||
def build_all(self, *, output_dir):
|
||||
self.build_genesis(output_dir=output_dir)
|
||||
for node_document in self.config.iterate(
|
||||
schema='promenade/KubernetesNode/v1'):
|
||||
self.build_node(node_document, output_dir=output_dir)
|
||||
|
||||
if self.validators:
|
||||
validate_script = renderer.render_template(
|
||||
self.config, template='scripts/validate-cluster.sh')
|
||||
_write_script(output_dir, 'validate-cluster.sh', validate_script)
|
||||
|
||||
def build_genesis(self, *, output_dir):
|
||||
LOG.info('Building genesis script')
|
||||
sub_config = self.config.extract_genesis_config()
|
||||
tarball = renderer.build_tarball_from_roles(
|
||||
config=sub_config,
|
||||
roles=['common', 'genesis'],
|
||||
file_specs=self.file_cache.values())
|
||||
|
||||
script = renderer.render_template(
|
||||
sub_config,
|
||||
template='scripts/genesis.sh',
|
||||
context={'tarball': tarball})
|
||||
|
||||
_write_script(output_dir, 'genesis.sh', script)
|
||||
|
||||
if self.validators:
|
||||
validate_script = renderer.render_template(
|
||||
sub_config, template='scripts/validate-genesis.sh')
|
||||
_write_script(output_dir, 'validate-genesis.sh', validate_script)
|
||||
|
||||
def build_node(self, node_document, *, output_dir):
|
||||
node_name = node_document['metadata']['name']
|
||||
LOG.info('Building script for node %s', node_name)
|
||||
sub_config = self.config.extract_node_config(node_name)
|
||||
file_spec_paths = [
|
||||
f['path'] for f in self.config.get_path('HostSystem:files', [])
|
||||
]
|
||||
file_specs = [self.file_cache[p] for p in file_spec_paths]
|
||||
tarball = renderer.build_tarball_from_roles(
|
||||
config=sub_config, roles=['common', 'join'], file_specs=file_specs)
|
||||
|
||||
script = renderer.render_template(
|
||||
sub_config,
|
||||
template='scripts/join.sh',
|
||||
context={'tarball': tarball})
|
||||
|
||||
_write_script(output_dir, _join_name(node_name), script)
|
||||
|
||||
if self.validators:
|
||||
validate_script = renderer.render_template(
|
||||
sub_config, template='scripts/validate-join.sh')
|
||||
_write_script(output_dir, 'validate-%s.sh' % node_name,
|
||||
validate_script)
|
||||
|
||||
|
||||
def _fetch_tar_content(*, url, path):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
f = io.BytesIO(response.content)
|
||||
tf = tarfile.open(fileobj=f, mode='r')
|
||||
buf_reader = tf.extractfile(path)
|
||||
return buf_reader.read()
|
||||
|
||||
|
||||
def _join_name(node_name):
|
||||
return 'join-%s.sh' % node_name
|
||||
|
||||
|
||||
def _write_script(output_dir, name, script):
|
||||
path = os.path.join(output_dir, name)
|
||||
with open(path, 'w') as f:
|
||||
os.fchmod(f.fileno(), 0o555)
|
||||
f.write(script)
|
@ -1,48 +1,69 @@
|
||||
from . import generator, logging, operator
|
||||
from . import builder, config, exceptions, generator, logging
|
||||
import click
|
||||
import os
|
||||
import sys
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option('-v', '--verbose', is_flag=True)
|
||||
def promenade(*, verbose):
|
||||
if _debug():
|
||||
verbose = True
|
||||
logging.setup(verbose=verbose)
|
||||
|
||||
|
||||
@promenade.command(help='Initialize a new cluster on one node')
|
||||
@click.option('-a', '--asset-dir', default='/assets',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Source path for binaries to deploy.')
|
||||
@click.option('-c', '--config-path', type=click.File(),
|
||||
help='Location of cluster configuration data.')
|
||||
@click.option('--hostname', help='Current hostname.')
|
||||
@click.option('-t', '--target-dir', default='/target',
|
||||
type=click.Path(exists=True, file_okay=False,
|
||||
dir_okay=True, resolve_path=True),
|
||||
help='Location where templated files will be placed.')
|
||||
def up(*, asset_dir, config_path, hostname, target_dir):
|
||||
|
||||
op = operator.Operator.from_config(config_path=config_path,
|
||||
hostname=hostname,
|
||||
target_dir=target_dir)
|
||||
|
||||
op.up(asset_dir=asset_dir)
|
||||
|
||||
|
||||
@promenade.command(help='Generate certs and keys')
|
||||
@click.option('-c', '--config-path', type=click.File(),
|
||||
required=True,
|
||||
help='Location of cluster configuration data.')
|
||||
@click.option('-o', '--output-dir', default='.',
|
||||
type=click.Path(exists=True, file_okay=False, dir_okay=True,
|
||||
resolve_path=True),
|
||||
@promenade.command('build-all', help='Construct all scripts')
|
||||
@click.option(
|
||||
'-o',
|
||||
'--output-dir',
|
||||
default='.',
|
||||
type=click.Path(
|
||||
exists=True, file_okay=False, dir_okay=True, resolve_path=True),
|
||||
required=True,
|
||||
help='Location to write complete cluster configuration.')
|
||||
def generate(*, config_path, output_dir):
|
||||
g = generator.Generator.from_config(config_path=config_path)
|
||||
g.generate_all(output_dir)
|
||||
@click.option('--validators', is_flag=True, help='Generate validation scripts')
|
||||
@click.argument('config_files', nargs=-1, type=click.File('rb'))
|
||||
def build_all(*, config_files, output_dir, validators):
|
||||
debug = _debug()
|
||||
try:
|
||||
c = config.Configuration.from_streams(
|
||||
debug=debug, streams=config_files)
|
||||
b = builder.Builder(c, validators=validators)
|
||||
b.build_all(output_dir=output_dir)
|
||||
except exceptions.PromenadeException as e:
|
||||
e.display(debug=debug)
|
||||
sys.exit(e.EXIT_CODE)
|
||||
|
||||
|
||||
@promenade.command('generate-certs', help='Generate a certs for a site')
|
||||
@click.option(
|
||||
'-o',
|
||||
'--output-dir',
|
||||
type=click.Path(
|
||||
exists=True, file_okay=False, dir_okay=True, resolve_path=True),
|
||||
required=True,
|
||||
help='Location to write *-certificates.yaml')
|
||||
@click.argument('config_files', nargs=-1, type=click.File('rb'))
|
||||
@click.option(
|
||||
'--calico-etcd-service-ip',
|
||||
default='10.96.232.136',
|
||||
help='Service IP for calico etcd')
|
||||
def genereate_certs(*, calico_etcd_service_ip, config_files, output_dir):
|
||||
debug = _debug()
|
||||
try:
|
||||
c = config.Configuration.from_streams(
|
||||
debug=debug, streams=config_files, substitute=False)
|
||||
g = generator.Generator(
|
||||
c, calico_etcd_service_ip=calico_etcd_service_ip)
|
||||
g.generate(output_dir)
|
||||
except exceptions.PromenadeException as e:
|
||||
e.display(debug=debug)
|
||||
sys.exit(e.EXIT_CODE)
|
||||
|
||||
|
||||
def _debug():
|
||||
return os.environ.get('PROMENADE_DEBUG', '').lower() in {'1', 'True'}
|
||||
|
@ -1,150 +1,206 @@
|
||||
from . import logging
|
||||
from operator import attrgetter, itemgetter
|
||||
import itertools
|
||||
from . import exceptions, logging, validation
|
||||
import copy
|
||||
import jinja2
|
||||
import jsonpath_ng
|
||||
import yaml
|
||||
|
||||
__all__ = ['Configuration', 'Document', 'load']
|
||||
|
||||
__all__ = ['Configuration']
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load(f):
|
||||
return Configuration(list(map(instantiate_document, yaml.safe_load_all(f))))
|
||||
|
||||
|
||||
def instantiate_document(data):
|
||||
if data.get('schema', '').startswith('armada'):
|
||||
return Document({
|
||||
'apiVersion': 'promenade/v1',
|
||||
'kind': 'ArmadaDocument',
|
||||
'metadata': {
|
||||
'name': data['schema'] + '/' + data['metadata']['name'],
|
||||
'target': 'none',
|
||||
},
|
||||
'spec': data,
|
||||
})
|
||||
else:
|
||||
return Document(data)
|
||||
|
||||
|
||||
class Document:
|
||||
KEYS = {
|
||||
'apiVersion',
|
||||
'metadata',
|
||||
'kind',
|
||||
'spec',
|
||||
}
|
||||
|
||||
SUPPORTED_KINDS = {
|
||||
'ArmadaDocument',
|
||||
|
||||
'Certificate',
|
||||
'CertificateAuthority',
|
||||
'CertificateAuthorityKey',
|
||||
'CertificateKey',
|
||||
'Cluster',
|
||||
'Etcd',
|
||||
'Masters',
|
||||
'Network',
|
||||
'Node',
|
||||
'PrivateKey',
|
||||
'PublicKey',
|
||||
'Versions',
|
||||
}
|
||||
|
||||
def __init__(self, data):
|
||||
if set(data.keys()) != self.KEYS:
|
||||
LOG.error('data.keys()=%s expected %s', data.keys(), self.KEYS)
|
||||
raise AssertionError('Did not get expected keys')
|
||||
assert data['apiVersion'] == 'promenade/v1'
|
||||
assert data['kind'] in self.SUPPORTED_KINDS
|
||||
assert data['metadata']['name']
|
||||
|
||||
self.data = data
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
return self.data['kind']
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.metadata['name']
|
||||
|
||||
@property
|
||||
def alias(self):
|
||||
return self.metadata.get('alias')
|
||||
|
||||
@property
|
||||
def target(self):
|
||||
return self.metadata.get('target')
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
return self.data['metadata']
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.data['spec'][key]
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self.data['spec'].get(key, default)
|
||||
|
||||
|
||||
class Configuration:
|
||||
def __init__(self, documents):
|
||||
self.documents = sorted(documents, key=attrgetter('kind', 'target'))
|
||||
def __init__(self, *, documents, debug=False, substitute=True):
|
||||
if substitute:
|
||||
documents = _substitute(documents)
|
||||
self.debug = debug
|
||||
self.documents = documents
|
||||
|
||||
self.validate()
|
||||
@classmethod
|
||||
def from_streams(cls, *, streams, **kwargs):
|
||||
documents = []
|
||||
for stream in streams:
|
||||
stream_name = getattr(stream, 'name')
|
||||
if stream_name is not None:
|
||||
LOG.info('Loading documents from %s', stream_name)
|
||||
stream_documents = list(yaml.safe_load_all(stream))
|
||||
validation.check_schemas(stream_documents)
|
||||
if stream_name is not None:
|
||||
LOG.info('Successfully validated documents from %s',
|
||||
stream_name)
|
||||
documents.extend(stream_documents)
|
||||
|
||||
def validate(self):
|
||||
identifiers = set()
|
||||
for document in self.documents:
|
||||
identifier = (document.kind, document.name)
|
||||
if identifier in identifiers:
|
||||
LOG.error('Found duplicate document in config: kind=%s name=%s',
|
||||
document.kind, document.name)
|
||||
raise RuntimeError('Duplicate document')
|
||||
return cls(documents=documents, **kwargs)
|
||||
|
||||
def __getitem__(self, path):
|
||||
value = self.get_path(path)
|
||||
if value:
|
||||
return value
|
||||
else:
|
||||
identifiers.add(identifier)
|
||||
return jinja2.StrictUndefined('No match found for path %s' % path)
|
||||
|
||||
def __getitem__(self, key):
|
||||
results = [d for d in self.documents if d.kind == key]
|
||||
if len(results) < 1:
|
||||
raise KeyError
|
||||
elif len(results) > 1:
|
||||
raise KeyError('Too many results.')
|
||||
def get_first(self, *paths):
|
||||
result = self._get_first(*paths)
|
||||
if result:
|
||||
return result
|
||||
else:
|
||||
return results[0]
|
||||
return jinja2.StrictUndefined(
|
||||
'Nothing found matching paths: %s' % ','.join(paths))
|
||||
|
||||
def get(self, *, kind=None, name=None, schema=None):
|
||||
result = _get(self.documents, kind=kind, schema=schema, name=name)
|
||||
|
||||
if result:
|
||||
return result['data']
|
||||
else:
|
||||
return jinja2.StrictUndefined(
|
||||
'No document found matching kind=%s schema=%s name=%s' %
|
||||
(kind, schema, name))
|
||||
|
||||
def iterate(self, *, kind=None, schema=None, labels=None):
|
||||
if kind is not None:
|
||||
assert schema is None
|
||||
schema = 'promenade/%s/v1' % kind
|
||||
|
||||
def get(self, *, kind, alias=None, name=None):
|
||||
for document in self.documents:
|
||||
if (document.kind == kind
|
||||
and (not alias or document.alias == alias)
|
||||
and (not name or document.name == name)) :
|
||||
if _matches_filter(document, schema=schema, labels=labels):
|
||||
yield document
|
||||
|
||||
def extract_genesis_config(self):
|
||||
LOG.debug('Extracting genesis config.')
|
||||
documents = []
|
||||
for document in self.documents:
|
||||
if document['schema'] != 'promenade/KubernetesNode/v1':
|
||||
documents.append(document)
|
||||
else:
|
||||
LOG.debug('Excluding schema=%s metadata.name=%s',
|
||||
document['schema'], _mg(document, 'name'))
|
||||
return Configuration(
|
||||
debug=self.debug, documents=documents, substitute=False)
|
||||
|
||||
def extract_node_config(self, name):
|
||||
LOG.debug('Extracting node config for %s.', name)
|
||||
documents = []
|
||||
for document in self.documents:
|
||||
schema = document['schema']
|
||||
if schema == 'promenade/Genesis/v1':
|
||||
LOG.debug('Excluding schema=%s metadata.name=%s', schema,
|
||||
_mg(document, 'name'))
|
||||
continue
|
||||
elif schema == 'promenade/KubernetesNode/v1' and _mg(
|
||||
document, 'name') != name:
|
||||
LOG.debug('Excluding schema=%s metadata.name=%s', schema,
|
||||
_mg(document, 'name'))
|
||||
continue
|
||||
else:
|
||||
documents.append(document)
|
||||
return Configuration(
|
||||
debug=self.debug, documents=documents, substitute=False)
|
||||
|
||||
@property
|
||||
def kubelet_name(self):
|
||||
for document in self.iterate(kind='Genesis'):
|
||||
return 'genesis'
|
||||
|
||||
for document in self.iterate(kind='KubernetesNode'):
|
||||
return document['data']['hostname']
|
||||
|
||||
return jinja2.StrictUndefined(
|
||||
'No Genesis or KubernetesNode found while getting kubelet name')
|
||||
|
||||
def _get_first(self, *paths):
|
||||
for path in paths:
|
||||
value = self.get_path(path)
|
||||
if value:
|
||||
return value
|
||||
|
||||
def get_path(self, path, default=None):
|
||||
kind, jsonpath = path.split(':')
|
||||
document = _get(self.documents, kind=kind)
|
||||
if document:
|
||||
data = _extract(document['data'], jsonpath)
|
||||
if data:
|
||||
return data
|
||||
return default
|
||||
|
||||
|
||||
def _matches_filter(document, *, schema, labels):
|
||||
matches = True
|
||||
if schema is not None and not document.get('schema',
|
||||
'').startswith(schema):
|
||||
matches = False
|
||||
|
||||
if labels is not None:
|
||||
document_labels = _mg(document, 'labels', [])
|
||||
for key, value in labels.items():
|
||||
if key not in document_labels:
|
||||
matches = False
|
||||
else:
|
||||
if document_labels[key] != value:
|
||||
matches = False
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def _get(documents, kind=None, schema=None, name=None):
|
||||
if kind is not None:
|
||||
assert schema is None
|
||||
schema = 'promenade/%s/v1' % kind
|
||||
|
||||
for document in documents:
|
||||
if (schema == document.get('schema')
|
||||
and (name is None or name == _mg(document, 'name'))):
|
||||
return document
|
||||
|
||||
def iterate(self, *, kind=None, target=None):
|
||||
if target:
|
||||
docs = self._iterate_with_target(target)
|
||||
else:
|
||||
docs = self.documents
|
||||
|
||||
for document in docs:
|
||||
if not kind or document.kind == kind:
|
||||
yield document
|
||||
def _substitute(documents):
|
||||
result = []
|
||||
|
||||
def get_armada_documents(self):
|
||||
return [d.data['spec'] for d in self.iterate(kind='ArmadaDocument')]
|
||||
for document in documents:
|
||||
dest_schema = document.get('schema')
|
||||
dest_name = _mg(document, 'name')
|
||||
LOG.debug('Checking for substitutions in schema=%s metadata.name=%s',
|
||||
dest_schema, dest_name)
|
||||
final_doc = copy.deepcopy(document)
|
||||
for substitution in _mg(document, 'substitutions', []):
|
||||
source_schema = substitution['src']['schema']
|
||||
source_name = substitution['src']['name']
|
||||
source_path = substitution['src']['path']
|
||||
dest_path = substitution['dest']['path']
|
||||
LOG.debug('Substituting from schema=%s name=%s src_path=%s '
|
||||
'into dest_path=%s', source_schema, source_name,
|
||||
source_path, dest_path)
|
||||
source_document = _get(
|
||||
documents, schema=source_schema, name=source_name)
|
||||
if source_document is None:
|
||||
msg = 'Failed to find source document for subsitution. ' \
|
||||
'dest_schema=%s dest_name=%s ' \
|
||||
'source_schema=%s source_name=%s' \
|
||||
% (dest_schema, dest_name, source_schema, source_name)
|
||||
LOG.critical(msg)
|
||||
raise exceptions.ValidationException(msg)
|
||||
|
||||
def _iterate_with_target(self, target):
|
||||
for document in self.documents:
|
||||
if document.target == target or document.target == 'all':
|
||||
yield document
|
||||
source_value = _extract(source_document['data'],
|
||||
substitution['src']['path'])
|
||||
final_doc['data'] = _replace(final_doc['data'], source_value,
|
||||
substitution['dest']['path'])
|
||||
|
||||
def write(self, path):
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump_all(map(attrgetter('data'), self.documents),
|
||||
default_flow_style=False,
|
||||
explicit_start=True,
|
||||
indent=2,
|
||||
stream=f)
|
||||
result.append(final_doc)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _extract(document, jsonpath):
|
||||
p = jsonpath_ng.parse(jsonpath)
|
||||
matches = p.find(document)
|
||||
if matches:
|
||||
return matches[0].value
|
||||
|
||||
|
||||
def _replace(document, value, jsonpath):
|
||||
p = jsonpath_ng.parse(jsonpath)
|
||||
return p.update(document, value)
|
||||
|
||||
|
||||
def _mg(document, field, default=None):
|
||||
return document.get('metadata', {}).get(field, default)
|
||||
|
21
promenade/exceptions.py
Normal file
21
promenade/exceptions.py
Normal file
@ -0,0 +1,21 @@
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PromenadeException(Exception):
|
||||
EXIT_CODE = 1
|
||||
|
||||
def __init__(self, message, *, trace=True):
|
||||
self.message = message
|
||||
self.trace = trace
|
||||
|
||||
def display(self, debug=False):
|
||||
if self.trace or debug:
|
||||
LOG.exception(self.message)
|
||||
else:
|
||||
LOG.error(self.message)
|
||||
|
||||
|
||||
class ValidationException(PromenadeException):
|
||||
pass
|
@ -1,486 +1,195 @@
|
||||
from . import config, logging, pki, renderer
|
||||
from . import logging, pki
|
||||
import os
|
||||
import yaml
|
||||
|
||||
__all__ = ['Generator']
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Generator:
|
||||
@classmethod
|
||||
def from_config(cls, *, config_path):
|
||||
return cls(input_config=(config.load(config_path)))
|
||||
def __init__(self, config, *, calico_etcd_service_ip):
|
||||
self.config = config
|
||||
self.calico_etcd_service_ip = calico_etcd_service_ip
|
||||
self.keys = pki.PKI()
|
||||
self.documents = []
|
||||
|
||||
def __init__(self, *, input_config):
|
||||
self.input_config = input_config
|
||||
@property
|
||||
def cluster_domain(self):
|
||||
return self.config['KubernetesNetwork:dns.cluster_domain']
|
||||
|
||||
self.validate()
|
||||
def generate(self, output_dir):
|
||||
# Certificate Authorities
|
||||
self.gen('ca', 'kubernetes')
|
||||
self.gen('ca', 'kubernetes-etcd')
|
||||
self.gen('ca', 'kubernetes-etcd-peer')
|
||||
self.gen('ca', 'calico-etcd')
|
||||
self.gen('ca', 'calico-etcd-peer')
|
||||
|
||||
def validate(self):
|
||||
required_kinds = ['Cluster', 'Network', 'Versions']
|
||||
for required_kind in required_kinds:
|
||||
try:
|
||||
self.input_config[required_kind]
|
||||
except KeyError:
|
||||
LOG.error('Generator requires one "%s" document to function.',
|
||||
required_kind)
|
||||
raise
|
||||
# Certificates for Kubernetes API server
|
||||
self.gen(
|
||||
'certificate',
|
||||
'apiserver',
|
||||
ca='kubernetes',
|
||||
cn='apiserver',
|
||||
hosts=self._service_dns('kubernetes', 'default') + [
|
||||
'localhost', '127.0.0.1', 'apiserver.kubernetes.promenade'
|
||||
] + [self.config['KubernetesNetwork:kubernetes.service_ip']])
|
||||
self.gen(
|
||||
'certificate',
|
||||
'apiserver-etcd',
|
||||
ca='kubernetes-etcd',
|
||||
cn='apiserver')
|
||||
|
||||
assert self.input_config['Cluster'].metadata['name'] \
|
||||
== self.input_config['Network'].metadata['cluster']
|
||||
# Certificates for other Kubernetes components
|
||||
self.gen(
|
||||
'certificate',
|
||||
'scheduler',
|
||||
ca='kubernetes',
|
||||
cn='system:kube-scheduler')
|
||||
self.gen(
|
||||
'certificate',
|
||||
'controller-manager',
|
||||
ca='kubernetes',
|
||||
cn='system:kube-controller-manager')
|
||||
self.gen('keypair', 'service-account')
|
||||
|
||||
def generate_additional_scripts(self, output_dir):
|
||||
r = renderer.Renderer(config=self.input_config,
|
||||
target_dir=output_dir)
|
||||
r.render_generate_files()
|
||||
self.gen_kubelet_certificates()
|
||||
|
||||
def generate_all(self, output_dir):
|
||||
self.generate_additional_scripts(output_dir)
|
||||
self.gen(
|
||||
'certificate', 'proxy', ca='kubernetes', cn='system:kube-proxy')
|
||||
|
||||
cluster = self.input_config['Cluster']
|
||||
network = self.input_config['Network']
|
||||
versions = self.input_config['Versions']
|
||||
armada_documents = list(self.input_config.iterate(kind='ArmadaDocument'))
|
||||
# Certificates for kubectl admin
|
||||
self.gen(
|
||||
'certificate',
|
||||
'admin',
|
||||
ca='kubernetes',
|
||||
cn='admin',
|
||||
groups=['system:masters'])
|
||||
|
||||
cluster_name = cluster.metadata['name']
|
||||
LOG.info('Generating configuration for cluster "%s"', cluster_name)
|
||||
masters = self.construct_masters(cluster_name)
|
||||
# Certificates for armada
|
||||
self.gen(
|
||||
'certificate',
|
||||
'armada',
|
||||
ca='kubernetes',
|
||||
cn='armada',
|
||||
groups=['system:masters'])
|
||||
|
||||
LOG.info('Generating common PKI for cluster "%s"', cluster_name)
|
||||
keys = pki.PKI(cluster_name)
|
||||
cluster_ca, cluster_ca_key = keys.generate_ca(
|
||||
ca_name='cluster',
|
||||
cert_target='all',
|
||||
key_target='masters')
|
||||
etcd_client_ca, etcd_client_ca_key = keys.generate_ca(
|
||||
ca_name='etcd-client',
|
||||
cert_target='masters',
|
||||
key_target='masters')
|
||||
etcd_peer_ca, etcd_peer_ca_key = keys.generate_ca(
|
||||
ca_name='etcd-peer',
|
||||
cert_target='masters',
|
||||
key_target='masters')
|
||||
calico_etcd_client_ca, calico_etcd_client_ca_key = keys.generate_ca(
|
||||
ca_name='calico-etcd-client',
|
||||
cert_target='masters',
|
||||
key_target='masters')
|
||||
calico_etcd_peer_ca, calico_etcd_peer_ca_key = keys.generate_ca(
|
||||
ca_name='calico-etcd-peer',
|
||||
cert_target='masters',
|
||||
key_target='masters')
|
||||
# Certificates for coredns
|
||||
self.gen('certificate', 'coredns', ca='kubernetes', cn='coredns')
|
||||
|
||||
admin_cert, admin_cert_key = keys.generate_certificate(
|
||||
name='admin',
|
||||
ca_name='cluster',
|
||||
groups=['system:masters'],
|
||||
target='masters',
|
||||
)
|
||||
# Certificates for Kubernetes's etcd servers
|
||||
self.gen_etcd_certificates(
|
||||
ca='kubernetes-etcd',
|
||||
genesis=True,
|
||||
service_name='kubernetes-etcd',
|
||||
service_namespace='kube-system',
|
||||
service_ip=self.config['KubernetesNetwork:etcd.service_ip'],
|
||||
additional_hosts=['etcd.kubernetes.promenade'])
|
||||
|
||||
sa_pub, sa_priv = keys.generate_keypair(
|
||||
name='service-account',
|
||||
target='masters',
|
||||
)
|
||||
# Certificates for Calico's etcd servers
|
||||
self.gen_etcd_certificates(
|
||||
ca='calico-etcd',
|
||||
service_name='calico-etcd',
|
||||
service_namespace='kube-system',
|
||||
service_ip=self.calico_etcd_service_ip,
|
||||
additional_hosts=['etcd.calico.promenade'])
|
||||
|
||||
config.Configuration([
|
||||
admin_cert,
|
||||
admin_cert_key,
|
||||
calico_etcd_client_ca,
|
||||
calico_etcd_client_ca_key,
|
||||
calico_etcd_peer_ca,
|
||||
calico_etcd_peer_ca_key,
|
||||
cluster_ca,
|
||||
cluster_ca_key,
|
||||
etcd_client_ca,
|
||||
etcd_client_ca_key,
|
||||
etcd_peer_ca,
|
||||
etcd_peer_ca_key,
|
||||
sa_priv,
|
||||
sa_pub,
|
||||
]).write(os.path.join(output_dir, 'admin-bundle.yaml'))
|
||||
# Certificates for Calico node
|
||||
self.gen(
|
||||
'certificate', 'calico-node', ca='calico-etcd', cn='calico-node')
|
||||
|
||||
complete_configuration = [
|
||||
admin_cert,
|
||||
admin_cert_key,
|
||||
calico_etcd_client_ca,
|
||||
calico_etcd_client_ca_key,
|
||||
calico_etcd_peer_ca,
|
||||
calico_etcd_peer_ca_key,
|
||||
cluster_ca,
|
||||
cluster_ca_key,
|
||||
etcd_client_ca,
|
||||
etcd_client_ca_key,
|
||||
etcd_peer_ca,
|
||||
etcd_peer_ca_key,
|
||||
masters,
|
||||
network,
|
||||
sa_priv,
|
||||
sa_pub,
|
||||
versions,
|
||||
]
|
||||
_write(output_dir, self.documents)
|
||||
|
||||
for hostname, data in cluster['nodes'].items():
|
||||
if 'genesis' in data.get('roles', []):
|
||||
genesis_hostname = hostname
|
||||
break
|
||||
def gen(self, kind, *args, **kwargs):
|
||||
method = getattr(self.keys, 'generate_' + kind)
|
||||
|
||||
for hostname, data in cluster['nodes'].items():
|
||||
LOG.debug('Generating configuration & PKI for hostname=%s',
|
||||
hostname)
|
||||
node = _construct_node_config(cluster_name, hostname, data)
|
||||
self.documents.extend(method(*args, **kwargs))
|
||||
|
||||
kubelet_cert, kubelet_cert_key = keys.generate_certificate(
|
||||
alias='kubelet',
|
||||
name='system:node:%s' % hostname,
|
||||
ca_name='cluster',
|
||||
groups=['system:nodes'],
|
||||
hosts=[
|
||||
hostname,
|
||||
data['ip'],
|
||||
],
|
||||
target=hostname)
|
||||
def gen_kubelet_certificates(self):
|
||||
self._gen_single_kubelet(
|
||||
'genesis', node_data=self.config.get(kind='Genesis'))
|
||||
for node_config in self.config.iterate(kind='KubernetesNode'):
|
||||
self._gen_single_kubelet(
|
||||
node_config['data']['hostname'], node_data=node_config['data'])
|
||||
|
||||
proxy_cert, proxy_cert_key = keys.generate_certificate(
|
||||
alias='proxy',
|
||||
config_name='system:kube-proxy:%s' % hostname,
|
||||
name='system:kube-proxy',
|
||||
ca_name='cluster',
|
||||
hosts=[
|
||||
hostname,
|
||||
data['ip'],
|
||||
],
|
||||
target=hostname)
|
||||
def _gen_single_kubelet(self, name, node_data):
|
||||
self.gen(
|
||||
'certificate',
|
||||
'kubelet-%s' % name,
|
||||
ca='kubernetes',
|
||||
cn='system:node:%s' % node_data['hostname'],
|
||||
hosts=[node_data['hostname'], node_data['ip']],
|
||||
groups=['system:nodes'])
|
||||
|
||||
complete_configuration.extend([
|
||||
kubelet_cert,
|
||||
kubelet_cert_key,
|
||||
node,
|
||||
proxy_cert,
|
||||
proxy_cert_key,
|
||||
])
|
||||
def gen_etcd_certificates(self, *, ca, genesis=False, **service_args):
|
||||
if genesis:
|
||||
self._gen_single_etcd(
|
||||
name='genesis',
|
||||
ca=ca,
|
||||
node_data=self.config.get(kind='Genesis'),
|
||||
**service_args)
|
||||
|
||||
common_documents = [
|
||||
cluster_ca,
|
||||
kubelet_cert,
|
||||
kubelet_cert_key,
|
||||
masters,
|
||||
network,
|
||||
node,
|
||||
proxy_cert,
|
||||
proxy_cert_key,
|
||||
versions,
|
||||
]
|
||||
role_specific_documents = []
|
||||
for node_config in self.config.iterate(kind='KubernetesNode'):
|
||||
self._gen_single_etcd(
|
||||
name=node_config['data']['hostname'],
|
||||
ca=ca,
|
||||
node_data=node_config['data'],
|
||||
**service_args)
|
||||
|
||||
if 'master' in data.get('roles', []):
|
||||
role_specific_documents.extend([
|
||||
admin_cert,
|
||||
admin_cert_key,
|
||||
calico_etcd_client_ca,
|
||||
calico_etcd_peer_ca,
|
||||
cluster_ca_key,
|
||||
etcd_client_ca,
|
||||
etcd_peer_ca,
|
||||
sa_priv,
|
||||
sa_pub,
|
||||
])
|
||||
if 'genesis' not in data.get('roles', []):
|
||||
etcd_config = _master_etcd_config(
|
||||
cluster_name, genesis_hostname, hostname, masters)
|
||||
calico_etcd_config = _master_calico_etcd_config(
|
||||
cluster_name, genesis_hostname, hostname, masters)
|
||||
complete_configuration.append(etcd_config)
|
||||
complete_configuration.append(calico_etcd_config)
|
||||
role_specific_documents.append(etcd_config)
|
||||
role_specific_documents.append(calico_etcd_config)
|
||||
master_documents = _master_config(hostname, data,
|
||||
masters, network, keys)
|
||||
complete_configuration.extend(master_documents)
|
||||
role_specific_documents.extend(master_documents)
|
||||
self.gen(
|
||||
'certificate',
|
||||
service_args['service_name'] + '-anchor',
|
||||
ca=ca,
|
||||
cn='anchor')
|
||||
|
||||
if 'genesis' in data.get('roles', []):
|
||||
role_specific_documents.extend(armada_documents)
|
||||
role_specific_documents.extend(_genesis_config(hostname, data,
|
||||
masters, network, keys))
|
||||
role_specific_documents.append(_genesis_etcd_config(cluster_name, hostname))
|
||||
role_specific_documents.append(_genesis_calico_etcd_config(cluster_name, hostname))
|
||||
node.data['spec']['is_genesis'] = True
|
||||
def _gen_single_etcd(self,
|
||||
*,
|
||||
name,
|
||||
ca,
|
||||
node_data,
|
||||
service_name,
|
||||
service_namespace,
|
||||
service_ip=None,
|
||||
additional_hosts=None):
|
||||
member_name = ca + '-' + name
|
||||
|
||||
c = config.Configuration(common_documents + role_specific_documents)
|
||||
c.write(os.path.join(output_dir, hostname + '.yaml'))
|
||||
|
||||
config.Configuration(complete_configuration).write(
|
||||
os.path.join(output_dir, 'complete-bundle.yaml'))
|
||||
|
||||
def construct_masters(self, cluster_name):
|
||||
masters = []
|
||||
for hostname, data in self.input_config['Cluster']['nodes'].items():
|
||||
if 'master' in data.get('roles', []) or 'genesis' in data.get('roles', []):
|
||||
masters.append({'hostname': hostname, 'ip': data['ip']})
|
||||
|
||||
return config.Document({
|
||||
'apiVersion': 'promenade/v1',
|
||||
'kind': 'Masters',
|
||||
'metadata': {
|
||||
'cluster': cluster_name,
|
||||
'name': cluster_name,
|
||||
'target': 'all',
|
||||
},
|
||||
'spec': {
|
||||
'nodes': masters,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
def _master_etcd_config(cluster_name, genesis_hostname, hostname, masters):
|
||||
initial_cluster = ['%s=https://%s:2380' % (m['hostname'],
|
||||
m['hostname'])
|
||||
for m in masters['nodes']]
|
||||
initial_cluster.extend([
|
||||
'auxiliary-etcd-0=https://%s:12380' % genesis_hostname,
|
||||
'auxiliary-etcd-1=https://%s:22380' % genesis_hostname,
|
||||
])
|
||||
return _etcd_config(cluster_name, alias='kube-etcd',
|
||||
name='master-kube-etcd:%s' % hostname,
|
||||
target=hostname,
|
||||
initial_cluster=initial_cluster,
|
||||
initial_cluster_state='existing')
|
||||
|
||||
|
||||
def _master_calico_etcd_config(cluster_name, genesis_hostname, hostname, masters):
|
||||
initial_cluster = ['%s=https://%s:6667' % (m['hostname'],
|
||||
m['hostname'])
|
||||
for m in masters['nodes']]
|
||||
initial_cluster.extend([
|
||||
'auxiliary-calico-etcd-0=https://%s:16667' % genesis_hostname,
|
||||
'auxiliary-calico-etcd-1=https://%s:26667' % genesis_hostname,
|
||||
])
|
||||
return _etcd_config(cluster_name, alias='calico-etcd',
|
||||
name='master-calico-etcd:%s' % hostname,
|
||||
target=hostname,
|
||||
initial_cluster=initial_cluster,
|
||||
initial_cluster_state='existing')
|
||||
|
||||
|
||||
def _genesis_etcd_config(cluster_name, hostname):
|
||||
initial_cluster = [
|
||||
'%s=https://%s:2380' % (hostname, hostname),
|
||||
'auxiliary-etcd-0=https://%s:12380' % hostname,
|
||||
'auxiliary-etcd-1=https://%s:22380' % hostname,
|
||||
]
|
||||
return _etcd_config(cluster_name, alias='kube-etcd',
|
||||
name='master-kube-etcd:%s' % hostname,
|
||||
target=hostname,
|
||||
initial_cluster=initial_cluster,
|
||||
initial_cluster_state='new')
|
||||
|
||||
|
||||
def _genesis_calico_etcd_config(cluster_name, hostname):
|
||||
initial_cluster = [
|
||||
'%s=https://%s:6667' % (hostname, hostname),
|
||||
'auxiliary-calico-etcd-0=https://%s:16667' % hostname,
|
||||
'auxiliary-calico-etcd-1=https://%s:26667' % hostname,
|
||||
]
|
||||
return _etcd_config(cluster_name, alias='calico-etcd',
|
||||
name='master-calico-etcd:%s' % hostname,
|
||||
target=hostname,
|
||||
initial_cluster=initial_cluster,
|
||||
initial_cluster_state='new')
|
||||
|
||||
|
||||
def _etcd_config(cluster_name, *, alias, name, target,
|
||||
initial_cluster, initial_cluster_state):
|
||||
return config.Document({
|
||||
'apiVersion': 'promenade/v1',
|
||||
'kind': 'Etcd',
|
||||
'metadata': {
|
||||
'cluster': cluster_name,
|
||||
'alias': alias,
|
||||
'name': name,
|
||||
'target': target,
|
||||
},
|
||||
'spec': {
|
||||
'initial_cluster': initial_cluster,
|
||||
'initial_cluster_state': initial_cluster_state,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
def _master_config(hostname, host_data, masters, network, keys):
|
||||
kube_domains = [
|
||||
'kubernetes',
|
||||
'kubernetes.default',
|
||||
'kubernetes.default.svc',
|
||||
'kubernetes.default.svc.cluster.local',
|
||||
hosts = [
|
||||
node_data['hostname'],
|
||||
node_data['ip'],
|
||||
'localhost',
|
||||
'127.0.0.1',
|
||||
]
|
||||
calico_domains = [
|
||||
'calico-etcd',
|
||||
'calico-etcd.kube-system',
|
||||
'calico-etcd.kube-system.svc',
|
||||
'calico-etcd.kube-system.svc.cluster.local',
|
||||
network['calico_etcd_service_ip'],
|
||||
] + (additional_hosts or [])
|
||||
|
||||
hosts.extend(self._service_dns(service_name, service_namespace))
|
||||
if service_ip is not None:
|
||||
hosts.append(service_ip)
|
||||
|
||||
self.gen(
|
||||
'certificate', member_name, ca=ca, cn=member_name, hosts=hosts)
|
||||
|
||||
self.gen(
|
||||
'certificate',
|
||||
member_name + '-peer',
|
||||
ca=ca + '-peer',
|
||||
cn=member_name,
|
||||
hosts=hosts)
|
||||
|
||||
def _service_dns(self, name, namespace):
|
||||
return [
|
||||
name,
|
||||
'.'.join([name, namespace]),
|
||||
'.'.join([name, namespace, 'svc']),
|
||||
'.'.join([name, namespace, 'svc', self.cluster_domain]),
|
||||
]
|
||||
|
||||
docs = []
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='etcd-client',
|
||||
name='etcd:client:%s' % hostname,
|
||||
ca_name='etcd-client',
|
||||
hosts=kube_domains + calico_domains + [hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='etcd-apiserver-client',
|
||||
name='etcd:client:apiserver:%s' % hostname,
|
||||
ca_name='etcd-client',
|
||||
hosts=[hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='etcd-peer',
|
||||
name='etcd:peer:%s' % hostname,
|
||||
ca_name='etcd-peer',
|
||||
hosts=kube_domains + [hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='calico-etcd-client',
|
||||
name='calico-etcd:client:%s' % hostname,
|
||||
ca_name='calico-etcd-client',
|
||||
hosts=kube_domains + calico_domains + [hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='calico-etcd-peer',
|
||||
name='calico-etcd:peer:%s' % hostname,
|
||||
ca_name='calico-etcd-peer',
|
||||
hosts=kube_domains + [hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='apiserver',
|
||||
name='apiserver:%s' % hostname,
|
||||
ca_name='cluster',
|
||||
hosts=kube_domains + [
|
||||
network['kube_service_ip'],
|
||||
hostname,
|
||||
host_data['ip'],
|
||||
],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='controller-manager',
|
||||
config_name='system:kube-controller-manager:%s' % hostname,
|
||||
name='system:kube-controller-manager',
|
||||
ca_name='cluster',
|
||||
hosts=[
|
||||
hostname,
|
||||
host_data['ip'],
|
||||
],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='scheduler',
|
||||
config_name='system:kube-scheduler:%s' % hostname,
|
||||
name='system:kube-scheduler',
|
||||
ca_name='cluster',
|
||||
hosts=[
|
||||
hostname,
|
||||
host_data['ip'],
|
||||
],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
return docs
|
||||
|
||||
|
||||
def _genesis_config(hostname, host_data, masters, network, keys):
|
||||
docs = []
|
||||
|
||||
for i in range(2):
|
||||
docs.extend(keys.generate_certificate(
|
||||
name='auxiliary-etcd-%d-client' % i,
|
||||
ca_name='etcd-client',
|
||||
hosts=[hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
name='auxiliary-etcd-%d-peer' % i,
|
||||
ca_name='etcd-peer',
|
||||
hosts=[hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
name='auxiliary-calico-etcd-%d-client' % i,
|
||||
ca_name='calico-etcd-client',
|
||||
hosts=[hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
name='auxiliary-calico-etcd-%d-peer' % i,
|
||||
ca_name='calico-etcd-peer',
|
||||
hosts=[hostname, host_data['ip']],
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
docs.extend(keys.generate_certificate(
|
||||
alias='calico-etcd-node-client',
|
||||
name='calico-etcd:node',
|
||||
ca_name='calico-etcd-client',
|
||||
target=hostname,
|
||||
))
|
||||
|
||||
return docs
|
||||
|
||||
|
||||
def _construct_node_config(cluster_name, hostname, data):
|
||||
spec = {
|
||||
'hostname': hostname,
|
||||
'ip': data['ip'],
|
||||
'labels': _labels(data.get('roles', []), data.get('additional_labels', [])),
|
||||
'templates': _templates(data.get('roles', [])),
|
||||
}
|
||||
|
||||
return config.Document({
|
||||
'apiVersion': 'promenade/v1',
|
||||
'kind': 'Node',
|
||||
'metadata': {
|
||||
'cluster': cluster_name,
|
||||
'name': hostname,
|
||||
'target': hostname,
|
||||
},
|
||||
'spec': spec,
|
||||
})
|
||||
|
||||
|
||||
ROLE_LABELS = {
|
||||
'genesis': [
|
||||
'promenade=genesis',
|
||||
],
|
||||
'master': [
|
||||
'node-role.kubernetes.io/master=',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _labels(roles, additional_labels):
|
||||
result = set()
|
||||
for role in roles:
|
||||
result.update(ROLE_LABELS.get(role, []))
|
||||
result.update(additional_labels)
|
||||
return sorted(result)
|
||||
|
||||
|
||||
def _templates(roles):
|
||||
return ['common'] + roles
|
||||
def _write(output_dir, docs):
|
||||
with open(os.path.join(output_dir, 'certificates.yaml'), 'w') as f:
|
||||
# Don't use safe_dump_all so we can block format certificate data.
|
||||
yaml.dump_all(
|
||||
docs,
|
||||
stream=f,
|
||||
default_flow_style=False,
|
||||
explicit_start=True,
|
||||
indent=2)
|
||||
|
@ -3,8 +3,7 @@ from logging import getLogger
|
||||
|
||||
__all__ = ['getLogger', 'setup']
|
||||
|
||||
|
||||
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s:%(funcName)s [%(lineno)3d] %(message)s'
|
||||
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s:%(funcName)s [%(lineno)3d] %(message)s' # noqa
|
||||
|
||||
|
||||
def setup(*, verbose):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user