Merge "ipv6 cluster networking support"

This commit is contained in:
Zuul 2019-05-31 22:00:48 +00:00 committed by Gerrit Code Review
commit c3607b574d
39 changed files with 1458 additions and 261 deletions

View File

@ -288,11 +288,9 @@ start()
fatal_error "Unable to copy $CONFIG_DIR/registry-cert.crt to certificates dir"
fi
# this is management network for now
REGISTRY_IP=$(get_ip controller)
mkdir -p /etc/docker/certs.d/$REGISTRY_IP:9001/
chmod 700 /etc/docker/certs.d/$REGISTRY_IP:9001/
cp $CONFIG_DIR/registry-cert.crt /etc/docker/certs.d/$REGISTRY_IP:9001/registry-cert.crt
mkdir -p /etc/docker/certs.d/registry.local:9001/
chmod 700 /etc/docker/certs.d/registry.local:9001/
cp $CONFIG_DIR/registry-cert.crt /etc/docker/certs.d/registry.local:9001/registry-cert.crt
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/registry-cert.crt to docker dir"

View File

@ -22,7 +22,7 @@ metadata:
name: {{ $classConfig.name }}
provisioner: {{ $provisioner }}
parameters:
monitors: {{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
adminId: {{ or $classConfig.adminId $defaults.adminId}}
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
adminSecretNamespace: {{ $namespace }}

View File

@ -17,7 +17,7 @@
replace:
path: /etc/barbican/barbican.conf
regexp: "bind_host=.*$"
replace: "bind_host={{ controller_floating_address }}"
replace: "bind_host={{ controller_floating_address | ipwrap }}"
- name: Restart barbican
systemd:

View File

@ -37,6 +37,10 @@
with_items:
- net.bridge.bridge-nf-call-ip6tables = 1
- net.bridge.bridge-nf-call-iptables = 1
- net.ipv4.ip_forward = 1
- net.ipv4.conf.default.rp_filter = 0
- net.ipv4.conf.all.rp_filter = 0
- net.ipv6.conf.all.forwarding = 1
- block:
- block:
@ -108,6 +112,15 @@
group: root
mode: 0644
- name: Set kubelet node configuration
set_fact:
node_ip: "{{ controller_0_cluster_host }}"
- name: Create kubelet override config file
template:
src: "kubelet.conf.j2"
dest: /etc/sysconfig/kubelet
- name: Enable kubelet
systemd:
name: kubelet
@ -125,15 +138,16 @@
warn: false
with_items:
- "sed -i -e 's|<%= @apiserver_advertise_address %>|'$CLUSTER_IP'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @etcd_endpoint %>|'http://\"$CLUSTER_IP\":$ETCD_PORT'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @apiserver_loopback_address %>|'$LOOPBACK_IP'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @etcd_endpoint %>|'$ETCD_ENDPOINT'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @service_domain %>|'cluster.local'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @pod_network_cidr %>|'$POD_NETWORK_CIDR'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @service_network_cidr %>|'$SERVICE_NETWORK_CIDR'|g' /etc/kubernetes/kubeadm.yaml"
- "sed -i '/<%- /d' /etc/kubernetes/kubeadm.yaml"
- "sed -i -e 's|<%= @k8s_registry %>|'$K8S_REGISTRY'|g' /etc/kubernetes/kubeadm.yaml"
environment:
CLUSTER_IP: "{{ cluster_floating_address }}"
ETCD_PORT: 2379
LOOPBACK_IP: "{{ '127.0.0.1' if ipv6_addressing == False else '::1' }}"
ETCD_ENDPOINT: "http://{{ cluster_floating_address | ipwrap }}:2379"
POD_NETWORK_CIDR: "{{ cluster_pod_subnet }}"
SERVICE_NETWORK_CIDR: "{{ cluster_service_subnet }}"
K8S_REGISTRY: "{{ k8s_registry }}"
@ -153,73 +167,40 @@
dest: /etc/profile.d/kubeconfig.sh
remote_src: yes
- name: Create Multus config file
copy:
src: "{{ multus_yaml_template }}"
dest: /etc/kubernetes/multus.yaml
remote_src: yes
- name: Update Multus config file
command: "sed -i -e 's|<%= @docker_registry %>|'$DOCKER_REGISTRY'|g' /etc/kubernetes/multus.yaml"
args:
warn: false
environment:
DOCKER_REGISTRY: "{{ docker_registry }}"
- name: Activate Multus Networking
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/multus.yaml"
- name: Set Calico cluster configuration
set_fact:
cluster_network_ipv4: "{{ cluster_pod_subnet | ipv4 }}"
cluster_network_ipv6: "{{ cluster_pod_subnet | ipv6 }}"
# Configure calico networking using the Kubernetes API datastore.
- name: Create Calico config file
copy:
src: "{{ calico_yaml_template }}"
template:
src: "calico-cni.yaml.j2"
dest: /etc/kubernetes/calico.yaml
remote_src: yes
- name: Update Calico config files with networking info
command: "{{ item }}"
args:
warn: false
with_items:
- "sed -i -e 's|<%= @apiserver_advertise_address %>|'$CLUSTER_IP'|g' /etc/kubernetes/calico.yaml"
- "sed -i -e 's|<%= @pod_network_cidr %>|'$POD_NETWORK_CIDR'|g' /etc/kubernetes/calico.yaml"
- "sed -i -e 's|<%= @quay_registry %>|'$QUAY_REGISTRY'|g' /etc/kubernetes/calico.yaml"
environment:
CLUSTER_IP: "{{ cluster_floating_address }}"
POD_NETWORK_CIDR: "{{ cluster_pod_subnet }}"
QUAY_REGISTRY: "{{ quay_registry }}"
- name: Activate Calico Networking
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml"
- name: Create SRIOV Networking config file
copy:
src: "{{ sriov_cni_yaml_template }}"
dest: /etc/kubernetes/sriov-cni.yaml
remote_src: yes
- name: Create Multus config file
template:
src: "multus-cni.yaml.j2"
dest: /etc/kubernetes/multus.yaml
- name: Update SRIOV Networking config file
command: "sed -i -e 's|<%= @docker_registry %>|'$DOCKER_REGISTRY'|g' /etc/kubernetes/sriov-cni.yaml"
args:
warn: false
environment:
DOCKER_REGISTRY: "{{ docker_registry }}"
- name: Activate Multus Networking
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/multus.yaml"
- name: Create SRIOV Networking config file
template:
src: "sriov-cni.yaml.j2"
dest: /etc/kubernetes/sriov-cni.yaml
- name: Activate SRIOV Networking
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriov-cni.yaml"
- name: Create SRIOV device plugin config file
copy:
src: "{{ sriovdp_daemonset_yaml_template }}"
template:
src: "sriov-plugin.yaml.j2"
dest: /etc/kubernetes/sriovdp-daemonset.yaml
remote_src: yes
- name: Update SRIOV device plugin config file
command: "sed -i -e 's|<%= @docker_registry %>|'$DOCKER_REGISTRY'|g' /etc/kubernetes/sriovdp-daemonset.yaml"
args:
warn: false
environment:
DOCKER_REGISTRY: "{{ docker_registry }}"
- name: Activate SRIOV device plugin
command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriovdp-daemonset.yaml"

View File

@ -48,6 +48,7 @@
src: "{{ item }}"
dest: "{{ config_permdir }}"
remote_src: yes
mode: preserve
with_items:
- "{{ registry_cert_key }}"
- "{{ registry_cert_crt }}"
@ -55,7 +56,7 @@
- name: Create docker certificate directory
file:
path: "{{ docker_cert_dir }}/{{ controller_floating_address }}:9001"
path: "{{ docker_cert_dir }}/registry.local:9001"
state: directory
recurse: yes
mode: 0700
@ -63,5 +64,5 @@
- name: Copy certificate file to docker certificate directory
copy:
src: "{{ registry_cert_crt }}"
dest: "{{ docker_cert_dir }}/{{ controller_floating_address }}:9001"
dest: "{{ docker_cert_dir }}/registry.local:9001"
remote_src: yes

View File

@ -0,0 +1,817 @@
---
# Calico Version v3.6
# Based off:
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
# hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
#
# This is the calico configuration file for systems with less than 50 nodes.
#
# Notes when upversioning calico:
#
# Refer to configuration instructions here:
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
# calico
#
# It is important to test in a multi-controller environment (ie: AIO-DX) that
# the pods can be pinged by their endpoint. ie: A pod running on controller-1
# can be pinged from controller-0, and vice versa.
#
# An additional test (run on controller-0) that queries the calico daemon
# health and status
#
# curl -O -L https://github.com/projectcalico/calicoctl/releases/download/
# v3.6.2/calicoctl
# chmod +x calicoctl
# sudo mv calicoctl /usr/local/bin
# export DATASTORE_TYPE=kubernetes
# sudo calicoctl node status
#
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the Calico backend to use.
calico_backend: "bird"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
# Create all the CustomResourceDefinitions needed for
# Calico policy and networking mode.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: "{{ quay_registry }}/calico/cni:v3.6.2"
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: "{{ quay_registry }}/calico/cni:v3.6.2"
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
containers:
# Runs node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: "{{ quay_registry }}/calico/node:v3.6.2"
env:
# Configure inbound failsafe rules
- name: FELIX_FAILSAFEINBOUNDHOSTPORTS
value: "tcp:22, udp:68, tcp:179"
# Configure output failsafe rules
- name: FELIX_FAILSAFEOUTBOUNDHOSTPORTS
value: "udp:53, udp:67, tcp:179"
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
{% if cluster_network_ipv4 -%}
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "can-reach={{ cluster_floating_address }}"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "{{ cluster_pod_subnet }}"
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "true"
{% else -%}
# Disable IPv4
- name: IP
value: "none"
{% endif -%}
{% if cluster_network_ipv6 -%}
- name: IP6
value: "autodetect"
- name: IP6_AUTODETECTION_METHOD
value: "can-reach={{ cluster_floating_address }}"
# The default IPv6 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV6POOL_CIDR
value: "{{ cluster_pod_subnet }}"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "true"
{% else -%}
# Disable IPv6
- name: IP6
value: "none"
{% endif -%}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
{% if cluster_network_ipv6 -%}
# Enable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "true"
- name: CALICO_ROUTER_ID
value: "hash"
{% else -%}
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
{% endif -%}
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
volumes:
# Used by node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest deploys the Calico node controller.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: "{{ quay_registry }}/calico/kube-controllers:v3.6.2"
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml

View File

@ -0,0 +1,2 @@
# Overrides config file for kubelet
KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}

View File

@ -0,0 +1,176 @@
# Multus Version v3.2
# Based on:
# https://github.com/intel/multus-cni/blob/release-v3/images/multus-daemonset.yml
#
# The following modifications have been made:
#
# - The multus CNI configuration file has been explicitly specified to ensure
# it has a lower lexographic order than the calico CNI configuration file.
#
# - The configMap has been modified to work with Calico rather than Flannel
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"delegates": [
{
"cniVersion": "0.3.0",
"name": "k8s-pod-network",
"type": "calico",
"masterplugin": true,
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
spec:
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: "{{ docker_registry }}/nfvpe/multus:v3.2"
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -cex
- |
#!/bin/bash
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/images/05-multus.conf
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/images/05-multus.conf
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 05-multus.conf

View File

@ -0,0 +1,45 @@
# SRIOV-CNI Release v1
# Based on:
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
namespace: kube-system
labels:
tier: node
app: sriov-cni
spec:
template:
metadata:
labels:
tier: node
app: sriov-cni
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: kube-sriov-cni
image: "{{ docker_registry }}/starlingx/k8s-cni-sriov:master-centos-stable-latest"
securityContext:
privileged: true
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
volumes:
- name: cnibin
hostPath:
path: /opt/cni/bin

View File

@ -0,0 +1,68 @@
# SRIOV device CNI plugin
# Based on:
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
#
# The following modifications have been made:
#
# - A nodeSelector of 'sriovdp' has been added to ensure the sriov device plugin
# pods only run on appropriately labelled nodes.
# - The config hostPath is explicitly set to 'File'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sriov-device-plugin
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
template:
metadata:
labels:
tier: node
app: sriovdp
spec:
nodeSelector:
beta.kubernetes.io/arch: amd64
sriovdp: enabled
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: "{{ docker_registry }}/starlingx/k8s-plugins-sriov-network-device:master-centos-stable-latest"
args:
- --log-level=10
securityContext:
privileged: false
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/device-plugins/
readOnly: false
- name: sysfs
mountPath: /sys
readOnly: true
- name: config
mountPath: /etc/pcidp/config.json
readOnly: true
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/device-plugins/
- name: sysfs
hostPath:
path: /sys
- name: config
hostPath:
path: /etc/pcidp/config.json
type: File

View File

@ -6,10 +6,6 @@ target_helm_bind_dir: /www/pages/helm_charts
helm_repo_name_apps: starlingx
helm_repo_name_platform: stx-platform
kube_admin_yaml_template: /usr/share/puppet/modules/platform/templates/kubeadm.yaml.erb
multus_yaml_template: /usr/share/puppet/modules/platform/templates/multus.yaml.erb
calico_yaml_template: /usr/share/puppet/modules/platform/templates/calico.yaml.erb
sriov_cni_yaml_template: /usr/share/puppet/modules/platform/templates/sriov-cni.yaml.erb
sriovdp_daemonset_yaml_template: /usr/share/puppet/modules/platform/templates/sriovdp-daemonset.yaml.erb
kubelet_override_template: /usr/share/puppet/modules/platform/templates/kube-stx-override.conf.erb
kubelet_pmond_template: /usr/share/puppet/modules/platform/templates/kubelet-pmond-conf.erb
lighttpd_conf_template: /usr/share/puppet/modules/openstack/templates/lighttpd.conf.erb

View File

@ -235,4 +235,6 @@
path: /etc/resolv.conf
regex: "nameserver ::1"
state: absent
when: ipv6_addressing == False
- name: Invalidate name service caching server
command: nscd -i hosts

View File

@ -81,6 +81,41 @@
failed_when: item.value|ipaddr == False
with_dict: "{{ network_params }}"
- set_fact:
ipv4_addressing: "{{ network_params.management_subnet|ipv4 }}"
ipv6_addressing: "{{ network_params.management_subnet|ipv6 }}"
- name: Validate all network subnets are IPv4
debug:
msg: "All infrastructure and cluster subnets must be the same IP version"
failed_when: item|ipv4 == False
with_items:
- "{{ network_params.management_subnet }}"
- "{{ network_params.cluster_host_subnet }}"
- "{{ network_params.cluster_pod_subnet }}"
- "{{ network_params.cluster_service_subnet }}"
- "{{ network_params.external_oam_subnet }}"
- "{{ network_params.management_multicast_subnet }}"
when: ipv4_addressing != False
- name: Validate all network subnets are IPv6
debug:
msg: "All infrastructure and cluster subnets must be the same IP version"
failed_when: item|ipv6 == False
with_items:
- "{{ network_params.management_subnet }}"
- "{{ network_params.cluster_host_subnet }}"
- "{{ network_params.cluster_pod_subnet }}"
- "{{ network_params.cluster_service_subnet }}"
- "{{ network_params.external_oam_subnet }}"
- "{{ network_params.management_multicast_subnet }}"
when: ipv6_addressing != False
- name: Validate pxeboot subnet is IPv4
debug:
msg: "pxeboot_subnet subnet must always be IPv4"
failed_when: network_params.pxeboot_subnet|ipv4 == False
- name: Fail if cluster pod/service subnet size is too small (minimum size = 65536)
fail:
msg: "Subnet size is too small, must have minimum {{ min_pod_service_num_addresses }} addresses."
@ -109,7 +144,7 @@
- name: Generate warning if subnet prefix is not typical for Standard systems
debug:
msg: "WARNING: Subnet prefix of less than /24 is not typical. This will affect scaling of the system!"
when: item|ipaddr('prefix')|int > typical_subnet_prefix and system_type == 'Standard'
when: item|ipaddr('prefix')|int < typical_subnet_prefix and system_type == 'Standard'
with_items:
- "{{ network_params.pxeboot_subnet }}"
- "{{ network_params.management_subnet }}"
@ -117,24 +152,12 @@
- "{{ network_params.external_oam_subnet }}"
- "{{ network_params.management_multicast_subnet }}"
- set_fact:
ipv6_addressing: "{{ network_params.management_subnet|ipv6 }}"
- block:
- name: Fail if IPv6 management on simplex
fail:
msg: "IPv6 management network not supported on simplex configuration."
when: system_mode == 'simplex'
- name: Fail if IPv6 prefix length is too short
fail:
msg: "IPv6 minimum prefix length is {{ minimum_prefix_length }}"
when: network_params.management_subnet|ipaddr('prefix')|int < minimum_ipv6_prefix_length
- name: Update localhost name ip mapping for IPv6
set_fact:
localhost_name_ip_mapping: "::1\tlocalhost\tlocalhost.localdomain localhost6 localhost6.localdomain6"
when: ipv6_addressing != False
- name: Fail if address allocation is misconfigured

View File

@ -43,10 +43,7 @@ class openstack::keystone (
# to the management address while still being able to authenticate the client
if str2bool($::is_initial_config_primary) {
$enabled = true
$bind_host = $::platform::network::mgmt::params::subnet_version ? {
6 => '[::]',
default => '0.0.0.0',
}
$bind_host = '[::]'
} else {
$enabled = false
$bind_host = $::platform::network::mgmt::params::controller_address_url
@ -202,7 +199,7 @@ class openstack::keystone::bootstrap(
$keystone_key_repo_path = "${::platform::drbd::cgcs::params::mountpoint}/keystone"
$eng_workers = $::platform::params::eng_workers
$bind_host = '0.0.0.0'
$bind_host = '[::]'
# In the case of a classical Multi-Region deployment, apply the Keystone
# controller configuration for Primary Region ONLY

View File

@ -6,6 +6,7 @@ define platform::dockerdistribution::write_config (
$registry_readonly = false,
$file_path = '/etc/docker-distribution/registry/runtime_config.yml',
$docker_registry_ip = undef,
$docker_registry_host = undef,
){
file { $file_path:
ensure => present,
@ -25,6 +26,7 @@ class platform::dockerdistribution::config
include ::platform::docker::params
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
$docker_registry_host = $::platform::network::mgmt::params::controller_address_url
$runtime_config = '/etc/docker-distribution/registry/runtime_config.yml'
$used_config = '/etc/docker-distribution/registry/config.yml'
@ -52,7 +54,8 @@ class platform::dockerdistribution::config
}
platform::dockerdistribution::write_config { 'runtime_config':
docker_registry_ip => $docker_registry_ip
docker_registry_ip => $docker_registry_ip,
docker_registry_host => $docker_registry_host
}
-> exec { 'use runtime config file':
@ -60,9 +63,10 @@ class platform::dockerdistribution::config
}
platform::dockerdistribution::write_config { 'readonly_config':
registry_readonly => true,
file_path => '/etc/docker-distribution/registry/readonly_config.yml',
docker_registry_ip => $docker_registry_ip,
registry_readonly => true,
file_path => '/etc/docker-distribution/registry/readonly_config.yml',
docker_registry_ip => $docker_registry_ip,
docker_registry_host => $docker_registry_host
}
file { '/etc/docker-distribution/registry/token_server.conf':
@ -184,14 +188,14 @@ class platform::dockerdistribution::config
mode => '0700',
}
-> file { "/etc/docker/certs.d/${docker_registry_ip}:9001":
-> file { '/etc/docker/certs.d/registry.local:9001':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { "/etc/docker/certs.d/${docker_registry_ip}:9001/registry-cert.crt":
-> file { '/etc/docker/certs.d/registry.local:9001/registry-cert.crt':
ensure => 'file',
owner => 'root',
group => 'root',

View File

@ -1,6 +1,8 @@
class platform::kubernetes::params (
$enabled = true,
$node_ip = undef,
$pod_network_cidr = undef,
$pod_network_ipversion = 4,
$service_network_cidr = undef,
$apiserver_advertise_address = undef,
$etcd_endpoint = undef,
@ -98,6 +100,7 @@ class platform::kubernetes::kubeadm {
include ::platform::docker::params
include ::platform::kubernetes::params
$node_ip = $::platform::kubernetes::params::node_ip
$host_labels = $::platform::kubernetes::params::host_labels
$k8s_reserved_cpus = $::platform::kubernetes::params::k8s_reserved_cpus
$k8s_reserved_mem = $::platform::kubernetes::params::k8s_reserved_mem
@ -108,7 +111,7 @@ class platform::kubernetes::kubeadm {
if $::platform::docker::params::k8s_registry {
$k8s_registry = $::platform::docker::params::k8s_registry
} else {
$k8s_registry = undef
$k8s_registry = 'k8s.gcr.io'
}
# Configure kubelet hugepage and cpumanager options
@ -181,11 +184,16 @@ class platform::kubernetes::master::init
include ::platform::params
include ::platform::docker::params
$apiserver_loopback_address = $pod_network_ipversion ? {
4 => '127.0.0.1',
6 => '::1',
}
# This is used for imageRepository in template kubeadm.yaml.erb
if $::platform::docker::params::k8s_registry {
$k8s_registry = $::platform::docker::params::k8s_registry
} else {
$k8s_registry = undef
$k8s_registry = 'k8s.gcr.io'
}
# This is used for calico image in template calico.yaml.erb
@ -633,16 +641,17 @@ class platform::kubernetes::firewall
$d_mgmt_subnet = "! ${s_mgmt_subnet}"
if $system_mode != 'simplex' {
firewall { '000 kubernetes nat':
table => $table,
chain => $chain,
proto => $transport,
jump => $jump,
dport => $dports,
destination => $d_mgmt_subnet,
source => $s_mgmt_subnet,
tosource => $oam_float_ip,
outiface => $oam_interface,
platform::firewall::rule { 'kubernetes-nat':
service_name => 'kubernetes',
table => $table,
chain => $chain,
proto => $transport,
jump => $jump,
ports => $dports,
host => $s_mgmt_subnet,
destination => $d_mgmt_subnet,
outiface => $oam_interface,
tosource => $oam_float_ip,
}
}
}

View File

@ -104,6 +104,25 @@ define network_address (
}
}
# Defines a single route resource for an interface.
# If multiple are required in the future, then this will need to
# iterate over a hash to create multiple entries per file.
define network_route6 (
$prefix,
$gateway,
$ifname,
) {
file { "/etc/sysconfig/network-scripts/route6-${ifname}":
ensure => present,
owner => root,
group => root,
mode => '0644',
content => "${prefix} via ${gateway} dev ${ifname}"
}
}
class platform::addresses (
$address_config = {},
) {
@ -132,8 +151,23 @@ class platform::interfaces (
create_resources('network_config', $network_config, {})
create_resources('network_route', $route_config, {})
create_resources('platform::interfaces::sriov_config', $sriov_config, {})
include ::platform::params
include ::platform::network::mgmt::params
# Add static IPv6 default route since DHCPv6 does not support the router option
if $::personality != 'controller' {
if $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
network_route6 { 'ipv6 default route':
prefix => 'default',
gateway => $::platform::network::mgmt::params::controller_address,
ifname => $::platform::network::mgmt::params::interface_name
}
}
}
}
class platform::network::apply {
include ::platform::interfaces
include ::platform::addresses
@ -151,6 +185,10 @@ class platform::network::apply {
-> Network_route <| |>
-> Exec['apply-network-config']
Network_config <| |>
-> Network_route6 <| |>
-> Exec['apply-network-config']
exec {'apply-network-config':
command => 'apply_network_config.sh',
}

View File

@ -235,11 +235,11 @@ class platform::sm
}
exec { 'Configure Management Interface':
command => "sm-configure interface controller management-interface ${mgmt_ip_multicast} ${management_my_unit_ip} 2222 2223 \"\" 2222 2223",
command => "sm-configure interface controller management-interface \"\" ${management_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
exec { 'Configure Cluster Host Interface':
command => "sm-configure interface controller cluster-host-interface ${cluster_host_ip_multicast} ${cluster_host_my_unit_ip} 2222 2223 \"\" 2222 2223",
command => "sm-configure interface controller cluster-host-interface \"\" ${cluster_host_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
} else {

View File

@ -1,6 +1,4 @@
class platform::sysctl::params (
$ip_forwarding = false,
$ip_version = $::platform::params::ipv4,
$low_latency = false,
) inherits ::platform::params {}
@ -8,6 +6,10 @@ class platform::sysctl::params (
class platform::sysctl
inherits ::platform::sysctl::params {
include ::platform::network::mgmt::params
$ip_version = $::platform::network::mgmt::params::subnet_version
# Increase min_free_kbytes to 128 MiB from 88 MiB, helps prevent OOM
sysctl::value { 'vm.min_free_kbytes':
value => '131072'
@ -20,7 +22,6 @@ class platform::sysctl
# Enable br_netfilter (required to allow setting bridge-nf-call-arptables)
exec { 'modprobe br_netfilter':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'modprobe br_netfilter',
}
@ -51,6 +52,35 @@ class platform::sysctl
value => '0',
}
}
if $ip_version == $::platform::params::ipv6 {
sysctl::value { 'net.ipv6.conf.all.forwarding':
value => '1'
}
} else {
sysctl::value { 'net.ipv4.ip_forward':
value => '1'
}
sysctl::value { 'net.ipv4.conf.default.rp_filter':
value => '0'
}
sysctl::value { 'net.ipv4.conf.all.rp_filter':
value => '0'
}
# If this manifest is applied without rebooting the controller, as is done
# when config_controller is run, any existing interfaces will not have
# their rp_filter setting changed. This is because the kernel uses a MAX
# of the 'all' setting (which is now 0) and the current setting for the
# interface (which will be 1). When a blade is rebooted, the interfaces
# come up with the new 'default' setting so all is well.
exec { 'Clear rp_filter for existing interfaces':
command => "bash -c 'for f in /proc/sys/net/ipv4/conf/*/rp_filter; do echo 0 > \$f; done'",
}
}
}
@ -99,40 +129,6 @@ class platform::sysctl::controller
sysctl::value { 'kernel.shmmax':
value => '167772160'
}
if $ip_forwarding {
if $ip_version == $::platform::params::ipv6 {
# sysctl does not support ipv6 rp_filter
sysctl::value { 'net.ipv6.conf.all.forwarding':
value => '1'
}
} else {
sysctl::value { 'net.ipv4.ip_forward':
value => '1'
}
sysctl::value { 'net.ipv4.conf.default.rp_filter':
value => '0'
}
sysctl::value { 'net.ipv4.conf.all.rp_filter':
value => '0'
}
# If this manifest is applied without rebooting the controller, as is done
# when config_controller is run, any existing interfaces will not have
# their rp_filter setting changed. This is because the kernel uses a MAX
# of the 'all' setting (which is now 0) and the current setting for the
# interface (which will be 1). When a blade is rebooted, the interfaces
# come up with the new 'default' setting so all is well.
exec { 'Clear rp_filter for existing interfaces':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => "bash -c 'for f in /proc/sys/net/ipv4/conf/*/rp_filter; do echo 0 > \$f; done'",
}
}
}
}

View File

@ -21,7 +21,7 @@
# health and status
#
# curl -O -L https://github.com/projectcalico/calicoctl/releases/download/
# v3.6.1/calicoctl
# v3.6.2/calicoctl
# chmod +x calicoctl
# sudo mv calicoctl /usr/local/bin
# export DATASTORE_TYPE=kubernetes
@ -57,7 +57,17 @@ data:
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
"type": "calico-ipam",
<%- if @pod_network_ipversion == 4 -%>
"assign_ipv4": "true",
<%- else -%>
"assign_ipv4": "false",
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
"assign_ipv6": "true"
<%- else -%>
"assign_ipv6": "false"
<%- end -%>
},
"policy": {
"type": "k8s"
@ -530,7 +540,7 @@ spec:
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: <%= @quay_registry %>/calico/cni:v3.6.1
image: "<%= @quay_registry %>/calico/cni:v3.6.2"
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
@ -550,7 +560,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: <%= @quay_registry %>/calico/cni:v3.6.1
image: "<%= @quay_registry %>/calico/cni:v3.6.2"
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -586,7 +596,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: <%= @quay_registry %>/calico/node:v3.6.1
image: "<%= @quay_registry %>/calico/node:v3.6.2"
env:
# Configure inbound failsafe rules
- name: FELIX_FAILSAFEINBOUNDHOSTPORTS
@ -615,6 +625,7 @@ spec:
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
<%- if @pod_network_ipversion == 4 -%>
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
@ -622,26 +633,56 @@ spec:
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "<%= @pod_network_cidr %>"
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "true"
<%- else -%>
- name: IP
value: "none"
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
- name: IP6
value: "autodetect"
- name: IP6_AUTODETECTION_METHOD
value: "can-reach=<%= @apiserver_advertise_address %>"
# The default IPv6 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV6POOL_CIDR
value: "<%= @pod_network_cidr %>"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "true"
<%- else -%>
- name: IP6
value: "none"
<%- end -%>
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "<%= @pod_network_cidr %>"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
<%- if @pod_network_ipversion == 6 -%>
# Enable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "true"
- name: CALICO_ROUTER_ID
value: "hash"
<%- else -%>
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
<%- end -%>
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
@ -752,7 +793,7 @@ spec:
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: <%= @quay_registry %>/calico/kube-controllers:v3.6.1
image: "<%= @quay_registry %>/calico/kube-controllers:v3.6.2"
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS

View File

@ -113,3 +113,7 @@ addn-hosts=<%= @config_path %>/dnsmasq.addn_hosts_dc
<%- if @dns_service_ip != nil -%>
server=/<%= @service_domain %>/<%= @dns_service_ip %>
<%- end -%>
# Local CNAME records
cname=registry.local,controller
cname=registry-token-server.local,controller

View File

@ -11,7 +11,7 @@ storage:
readonly:
enabled: <%= @registry_readonly %>
http:
addr: <%= @docker_registry_ip %>:9001
addr: "<%= @docker_registry_host %>:9001"
tls:
certificate: /etc/ssl/private/registry-cert.crt
key: /etc/ssl/private/registry-cert.key
@ -24,7 +24,7 @@ health:
threshold: 3
auth:
token:
realm: https://<%= @docker_registry_ip %>:9002/token/
service: <%= @docker_registry_ip %>:9001
realm: "https://<%= @docker_registry_host %>:9002/token/"
service: "<%= @docker_registry_host %>:9001"
issuer: bird-token-server
rootcertbundle: /etc/ssl/private/registry-cert.crt

View File

@ -12,7 +12,7 @@ etcd:
- <%= @etcd_endpoint %>
apiServerCertSANs:
- "<%= @apiserver_advertise_address %>"
- "127.0.0.1"
- "<%= @apiserver_loopback_address %>"
networking:
dnsDomain: <%= @service_domain %>
podSubnet: <%= @pod_network_cidr %>
@ -21,9 +21,7 @@ controllerManagerExtraArgs:
node-monitor-period: "2s"
node-monitor-grace-period: "20s"
pod-eviction-timeout: "30s"
<%- if @k8s_registry -%>
imageRepository: "<%= @k8s_registry %>"
<%- end -%>
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1

View File

@ -1,2 +1,2 @@
# Overrides config file for kubelet
KUBELET_EXTRA_ARGS=--feature-gates=HugePages=<%= @k8s_hugepage %> <%= @k8s_cpu_manager_opts %>
KUBELET_EXTRA_ARGS=--node-ip=<%= @node_ip %> --feature-gates=HugePages=<%= @k8s_hugepage %> <%= @k8s_cpu_manager_opts %>

View File

@ -93,7 +93,17 @@ data:
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": {
"type": "calico-ipam"
"type": "calico-ipam",
<%- if @pod_network_ipversion == 4 -%>
"assign_ipv4": "true",
<%- else -%>
"assign_ipv4": "false",
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
"assign_ipv6": "true"
<%- else -%>
"assign_ipv6": "false"
<%- end -%>
},
"policy": {
"type": "k8s"

View File

@ -3,8 +3,9 @@ prompt = no
x509_extensions = v3_req
distinguished_name = dn
[dn]
CN = <%= @docker_registry_ip %>
CN = registry.local
[v3_req]
subjectAltName = @alt_names
[alt_names]
IP = <%= @docker_registry_ip %>
DNS.1 = registry.local
IP.1 = <%= @docker_registry_ip %>

View File

@ -1,7 +1,7 @@
REGISTRY_TOKEN_SERVER_ADDR=<%= @docker_registry_ip %>:9002
REGISTRY_TOKEN_SERVER_ADDR=<%= @docker_registry_host %>:9002
REGISTRY_TOKEN_SERVER_ISSUER=bird-token-server
REGISTRY_TOKEN_SERVER_KS_ENDPOINT=<%= @registry_ks_endpoint %>
REGISTRY_TOKEN_SERVER_TLSCERT=/etc/ssl/private/registry-cert.crt
REGISTRY_TOKEN_SERVER_TLSKEY=/etc/ssl/private/registry-cert.key
REGISTRY_TOKEN_SERVER_REALM=https://<%= @docker_registry_ip %>:9002/token/
REGISTRY_TOKEN_SERVER_REALM=https://<%= @docker_registry_host %>:9002/token/
REGISTRY_TOKEN_SERVER_KEY=/etc/ssl/private/registry-cert-pkcs1.key

View File

@ -1321,7 +1321,9 @@ MURANO_CERT_KEY_FILE = os.path.join(CERT_MURANO_DIR, CERT_KEY_FILE)
MURANO_CERT_FILE = os.path.join(CERT_MURANO_DIR, CERT_FILE)
MURANO_CERT_CA_FILE = os.path.join(CERT_MURANO_DIR, CERT_CA_FILE)
DOCKER_REGISTRY_HOST = 'registry.local'
DOCKER_REGISTRY_PORT = '9001'
DOCKER_REGISTRY_SERVER = '%s:%s' % (DOCKER_REGISTRY_HOST, DOCKER_REGISTRY_PORT)
DOCKER_REGISTRY_CERT_FILE = os.path.join(SSL_CERT_DIR, "registry-cert.crt")
DOCKER_REGISTRY_KEY_FILE = os.path.join(SSL_CERT_DIR, "registry-cert.key")
DOCKER_REGISTRY_PKCS1_KEY_FILE = os.path.join(SSL_CERT_DIR,
@ -1563,7 +1565,6 @@ SRIOVDP_LABEL = 'sriovdp=enabled'
# Default DNS service domain
DEFAULT_DNS_SERVICE_DOMAIN = 'cluster.local'
DEFAULT_DNS_SERVICE_IP = '10.96.0.10'
# Ansible bootstrap
ANSIBLE_BOOTSTRAP_FLAG = os.path.join(tsc.VOLATILE_PATH, ".ansible_bootstrap")

View File

@ -538,6 +538,18 @@ def get_ip_version(network):
return "IPv4"
def format_url_address(address):
"""Format the URL address according to RFC 2732"""
try:
addr = netaddr.IPAddress(address)
if addr.version == constants.IPV6_FAMILY:
return "[%s]" % address
else:
return str(address)
except netaddr.AddrFormatError:
return address
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts."""
if not lst:
@ -1751,11 +1763,8 @@ def get_numa_index_list(obj):
return obj_lists
def _format_ceph_mon_address(ip_address, service_port_mon):
if netaddr.IPAddress(ip_address).version == constants.IPV4_FAMILY:
return '%s:%d' % (ip_address, service_port_mon)
else:
return '[%s]:%d' % (ip_address, service_port_mon)
def format_ceph_mon_address(ip_address, service_port_mon):
return '%s:%d' % (format_url_address(ip_address), service_port_mon)
def get_files_matching(path, pattern):

View File

@ -8,11 +8,13 @@ import base64
import keyring
import requests
from sysinv.common import constants
from sysinv.common import exception
CERT_PATH = '/etc/ssl/private/registry-cert.crt'
KEYRING_SERVICE = 'CGCS'
REGISTRY_USERNAME = 'admin'
REGISTRY_BASEURL = 'https://%s/v2/' % constants.DOCKER_REGISTRY_SERVER
def get_registry_password():
@ -76,30 +78,30 @@ def docker_registry_authenticate(www_authenticate):
return auth_headers
def docker_registry_get(path, registry_addr):
def docker_registry_get(path, registry_url=REGISTRY_BASEURL):
# we need to have this header to get the correct digest when giving the tag
headers = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"}
resp = requests.get("%s%s" % (registry_addr, path), verify=CERT_PATH, headers=headers)
resp = requests.get("%s%s" % (registry_url, path), verify=CERT_PATH, headers=headers)
# authenticated registry, need to do auth with token server
if resp.status_code == 401:
auth_headers = docker_registry_authenticate(resp.headers["Www-Authenticate"])
headers.update(auth_headers)
resp = requests.get("%s%s" % (registry_addr, path), verify=CERT_PATH, headers=headers)
resp = requests.get("%s%s" % (registry_url, path), verify=CERT_PATH, headers=headers)
return resp
def docker_registry_delete(path, registry_addr):
def docker_registry_delete(path, registry_url=REGISTRY_BASEURL):
headers = {}
resp = requests.delete("%s%s" % (registry_addr, path), verify=CERT_PATH, headers=headers)
resp = requests.delete("%s%s" % (registry_url, path), verify=CERT_PATH, headers=headers)
# authenticated registry, need to do auth with token server
if resp.status_code == 401:
auth_headers = docker_registry_authenticate(resp.headers["Www-Authenticate"])
headers.update(auth_headers)
resp = requests.delete("%s%s" % (registry_addr, path), verify=CERT_PATH, headers=headers)
resp = requests.delete("%s%s" % (registry_url, path), verify=CERT_PATH, headers=headers)
return resp

View File

@ -326,11 +326,10 @@ class AppOperator(object):
the image tags in the manifest file. Intended for system app.
The image tagging conversion(local docker registry address prepended):
${LOCAL_DOCKER_REGISTRY_IP}:${REGISTRY_PORT}/<image-name>
(ie..192.168.204.2:9001/docker.io/mariadb:10.2.13)
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(ie..registry.local:9001/docker.io/mariadb:10.2.13)
"""
local_registry_server = self._docker.get_local_docker_registry_server()
manifest_image_tags_updated = False
image_tags = []
@ -389,13 +388,15 @@ class AppOperator(object):
images_manifest.update({key: images_charts[key]})
if not re.match(r'^.+:.+/', images_manifest[key]):
images_manifest.update(
{key: '{}/{}'.format(local_registry_server, images_manifest[key])})
{key: '{}/{}'.format(constants.DOCKER_REGISTRY_SERVER,
images_manifest[key])})
chart_image_tags_updated = True
image_tags.append(images_manifest[key])
else:
if not re.match(r'^.+:.+/', images_overrides[key]):
images_overrides.update(
{key: '{}/{}'.format(local_registry_server, images_overrides[key])})
{key: '{}/{}'.format(constants.DOCKER_REGISTRY_SERVER,
images_overrides[key])})
overrides_image_tags_updated = True
image_tags.append(images_overrides[key])
@ -438,8 +439,8 @@ class AppOperator(object):
b. tag and push them to the docker registery on the controller
c. find image tag IDs in each chart and replace their values with
new tags. Alternatively, document the image tagging convention
${MGMT_FLOATING_IP}:${REGISTRY_PORT}/<image-name>
(e.g. 192.168.204.2:9001/prom/mysqld-exporter)
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(e.g. registry.local:9001/prom/mysqld-exporter)
to be referenced in the application Helm charts.
"""
raise exception.KubeAppApplyFailure(
@ -768,13 +769,12 @@ class AppOperator(object):
continue
try:
local_registry_server = self._docker.get_local_docker_registry_server()
local_registry_auth = get_local_docker_registry_auth()
auth = '{0}:{1}'.format(local_registry_auth['username'],
local_registry_auth['password'])
token = '{{\"auths\": {{\"{0}\": {{\"auth\": \"{1}\"}}}}}}'.format(
local_registry_server, base64.b64encode(auth))
constants.DOCKER_REGISTRY_SERVER, base64.b64encode(auth))
body['data'].update({'.dockerconfigjson': base64.b64encode(token)})
body['metadata'].update({'name': DOCKER_REGISTRY_SECRET,
@ -1507,6 +1507,7 @@ class DockerHelper(object):
detach=True,
volumes=binds,
restart_policy={'Name': 'always'},
network_mode='host',
command=None)
LOG.info("Armada service started!")
return container
@ -1528,6 +1529,10 @@ class DockerHelper(object):
rc = True
# Instruct Armada to use the tiller service since it does not properly
# process IPv6 endpoints, therefore use a resolvable hostname
tiller_host = " --tiller-host tiller-deploy.kube-system.svc.cluster.local"
try:
client = docker.from_env(timeout=INSTALLATION_TIMEOUT)
armada_svc = self._start_armada_service(client)
@ -1549,7 +1554,7 @@ class DockerHelper(object):
"%s: %s." % (manifest_file, exec_logs))
elif request == constants.APP_APPLY_OP:
cmd = "/bin/bash -c 'armada apply --debug " + manifest_file +\
overrides_str + " | tee " + logfile + "'"
overrides_str + tiller_host + " | tee " + logfile + "'"
LOG.info("Armada apply command = %s" % cmd)
(exit_code, exec_logs) = armada_svc.exec_run(cmd)
if exit_code == 0:
@ -1572,7 +1577,7 @@ class DockerHelper(object):
"%s." % (manifest_file, exec_logs))
elif request == constants.APP_DELETE_OP:
cmd = "/bin/bash -c 'armada delete --debug --manifest " +\
manifest_file + " | tee " + logfile + "'"
manifest_file + tiller_host + " | tee " + logfile + "'"
(exit_code, exec_logs) = armada_svc.exec_run(cmd)
if exit_code == 0:
LOG.info("Application charts were successfully "
@ -1599,14 +1604,6 @@ class DockerHelper(object):
(request, manifest_file, e))
return rc
def get_local_docker_registry_server(self):
registry_ip = self._dbapi.address_get_by_name(
cutils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
).address
registry_server = '{}:{}'.format(registry_ip, constants.DOCKER_REGISTRY_PORT)
return registry_server
def _get_img_tag_with_registry(self, pub_img_tag):
registry_name = pub_img_tag[0:1 + pub_img_tag.find('/')]
img_name = pub_img_tag[1 + pub_img_tag.find('/'):]
@ -1651,10 +1648,9 @@ class DockerHelper(object):
rc = True
# retrieve user specified registries first
self._retrieve_specified_registries()
local_registry_server = self.get_local_docker_registry_server()
start = time.time()
if img_tag.startswith(local_registry_server):
if img_tag.startswith(constants.DOCKER_REGISTRY_HOST):
try:
LOG.info("Image %s download started from local registry" % img_tag)
local_registry_auth = get_local_docker_registry_auth()
@ -1666,7 +1662,8 @@ class DockerHelper(object):
LOG.info("Image %s is not available in local registry, "
"download started from public/private registry"
% img_tag)
pub_img_tag = img_tag.replace(local_registry_server + "/", "")
pub_img_tag = img_tag.replace(
constants.DOCKER_REGISTRY_SERVER + "/", "")
target_img_tag = self._get_img_tag_with_registry(pub_img_tag)
client.pull(target_img_tag)
except Exception as e:

View File

@ -1431,19 +1431,8 @@ class ConductorManager(service.PeriodicService):
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def _get_docker_registry_addr(self):
registry_ip = self.dbapi.address_get_by_name(
cutils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
).address
registry_server = 'https://{}:{}/v2/'.format(
registry_ip, constants.DOCKER_REGISTRY_PORT)
return registry_server
def docker_registry_image_list(self, context):
image_list_response = docker_registry.docker_registry_get(
"_catalog", self._get_docker_registry_addr())
image_list_response = docker_registry.docker_registry_get("_catalog")
if image_list_response.status_code != 200:
LOG.error("Bad response from docker registry: %s"
% image_list_response.status_code)
@ -1466,7 +1455,7 @@ class ConductorManager(service.PeriodicService):
def docker_registry_image_tags(self, context, image_name):
image_tags_response = docker_registry.docker_registry_get(
"%s/tags/list" % image_name, self._get_docker_registry_addr())
"%s/tags/list" % image_name)
if image_tags_response.status_code != 200:
LOG.error("Bad response from docker registry: %s"
@ -1495,8 +1484,7 @@ class ConductorManager(service.PeriodicService):
# first get the image digest for the image name and tag provided
digest_resp = docker_registry.docker_registry_get("%s/manifests/%s"
% (image_name_and_tag[0], image_name_and_tag[1]),
self._get_docker_registry_addr())
% (image_name_and_tag[0], image_name_and_tag[1]))
if digest_resp.status_code != 200:
LOG.error("Bad response from docker registry: %s"
@ -1507,8 +1495,7 @@ class ConductorManager(service.PeriodicService):
# now delete the image
image_delete_response = docker_registry.docker_registry_delete(
"%s/manifests/%s" % (image_name_and_tag[0], image_digest),
self._get_docker_registry_addr())
"%s/manifests/%s" % (image_name_and_tag[0], image_digest))
if image_delete_response.status_code != 202:
LOG.error("Bad response from docker registry: %s"

View File

@ -174,7 +174,7 @@ class BaseHelm(object):
port = self.CEPH_MON_SERVICE_PORT
monitor_ips = self._get_ceph_monitor_ips()
formatted_monitor_ips = [
utils._format_ceph_mon_address(mon, port) for mon in monitor_ips
utils.format_ceph_mon_address(mon, port) for mon in monitor_ips
]
return formatted_monitor_ips
@ -188,6 +188,10 @@ class BaseHelm(object):
constants.CONTROLLER_0_HOSTNAME, constants.NETWORK_TYPE_MGMT)
return address.address
@staticmethod
def _format_url_address(address):
return utils.format_url_address(address)
def _get_host_cpu_list(self, host, function=None, threads=False):
"""
Retrieve a list of CPUs for the host, filtered by function and thread

View File

@ -117,13 +117,13 @@ class KeystoneApiProxyHelm(openstack.OpenstackBaseHelm):
}
def _get_transport_url(self):
host_url = self._get_management_address()
host_url = self._format_url_address(self._get_management_address())
auth_password = self._get_keyring_password('amqp', 'rabbit')
transport_url = "rabbit://guest:%s@%s:5672" % (auth_password, host_url)
return transport_url
def _get_database_connection(self):
host_url = self._get_management_address()
host_url = self._format_url_address(self._get_management_address())
auth_password = self._get_keyring_password(
self.DCORCH_SERVICE_NAME, 'database')
connection = "postgresql+psycopg2://admin-dcorch:%s@%s/dcorch" %\

View File

@ -5,7 +5,6 @@
#
import abc
import netaddr
import os
import six
@ -256,15 +255,7 @@ class BasePuppet(object):
@staticmethod
def _format_url_address(address):
"""Format the URL address according to RFC 2732"""
try:
addr = netaddr.IPAddress(address)
if addr.version == constants.IPV6_FAMILY:
return "[%s]" % address
else:
return str(address)
except netaddr.AddrFormatError:
return address
return utils.format_url_address(address)
# TODO (jgauld): Refactor to use utility has_openstack_compute(labels)
def is_openstack_compute(self, host):

View File

@ -9,6 +9,7 @@ import uuid
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.common.storage_backend_conf import StorageBackendConfig
from sysinv.puppet import openstack
@ -262,10 +263,8 @@ class CephPuppet(openstack.OpenstackBasePuppet):
}
def _format_ceph_mon_address(self, ip_address):
if netaddr.IPAddress(ip_address).version == constants.IPV4_FAMILY:
return '%s:%d' % (ip_address, self.SERVICE_PORT_MON)
else:
return '[%s]:%d' % (ip_address, self.SERVICE_PORT_MON)
return utils.format_ceph_mon_address(
ip_address, self.SERVICE_PORT_MON)
def _get_host_ceph_mon(self, host):
ceph_mons = self.dbapi.ceph_mon_get_by_ihost(host.uuid)

View File

@ -5,6 +5,7 @@
#
from __future__ import absolute_import
import netaddr
import os
import json
import subprocess
@ -19,6 +20,10 @@ from sysinv.puppet import interface
LOG = logging.getLogger(__name__)
# Offset aligns with kubeadm DNS IP allocation scheme:
# kubenetes/cmd/kubeadm/app/constants/constants.go:GetDNSIP
CLUSTER_SERVICE_DNS_IP_OFFSET = 10
class KubernetesPuppet(base.BasePuppet):
"""Class to encapsulate puppet operations for kubernetes configuration"""
@ -31,6 +36,8 @@ class KubernetesPuppet(base.BasePuppet):
{'platform::kubernetes::params::enabled': True,
'platform::kubernetes::params::pod_network_cidr':
self._get_pod_network_cidr(),
'platform::kubernetes::params::pod_network_ipversion':
self._get_pod_network_ipversion(),
'platform::kubernetes::params::service_network_cidr':
self._get_cluster_service_subnet(),
'platform::kubernetes::params::apiserver_advertise_address':
@ -73,6 +80,9 @@ class KubernetesPuppet(base.BasePuppet):
def get_host_config(self, host):
config = {}
# Update node configuration for host
config.update(self._get_host_node_config(host))
# Retrieve labels for this host
config.update(self._get_host_label_config(host))
@ -109,6 +119,10 @@ class KubernetesPuppet(base.BasePuppet):
def _get_pod_network_cidr(self):
return self._get_network_config(constants.NETWORK_TYPE_CLUSTER_POD)
def _get_pod_network_ipversion(self):
subnet = netaddr.IPNetwork(self._get_pod_network_cidr())
return subnet.version
def _get_cluster_service_subnet(self):
return self._get_network_config(constants.NETWORK_TYPE_CLUSTER_SERVICE)
@ -127,8 +141,15 @@ class KubernetesPuppet(base.BasePuppet):
return constants.DEFAULT_DNS_SERVICE_DOMAIN
def _get_dns_service_ip(self):
# Setting this to a constant for now. Will be configurable later
return constants.DEFAULT_DNS_SERVICE_IP
subnet = netaddr.IPNetwork(self._get_cluster_service_subnet())
return str(subnet[CLUSTER_SERVICE_DNS_IP_OFFSET])
def _get_host_node_config(self, host):
node_ip = self._get_address_by_name(
host.hostname, constants.NETWORK_TYPE_MGMT).address
return {
'platform::kubernetes::params::node_ip': node_ip
}
def _get_host_label_config(self, host):
config = {}

View File

@ -435,24 +435,6 @@ class PlatformPuppet(base.BasePuppet):
def _get_host_sysctl_config(self, host):
config = {}
if host.personality == constants.CONTROLLER:
remotelogging = self.dbapi.remotelogging_get_one()
ip_forwarding = (self._region_config() or
self._sdn_enabled() or
remotelogging.enabled)
# The forwarding IP version is based on the OAM network version
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_OAM)
ip_version = address.family
config.update({
'platform::sysctl::params::ip_forwarding': ip_forwarding,
'platform::sysctl::params::ip_version': ip_version,
})
if constants.LOWLATENCY in host.subfunctions:
config.update({
'platform::sysctl::params::low_latency': True

View File

@ -250,12 +250,9 @@ start()
fatal_error "This node is running a different load than the active controller and must be reinstalled"
fi
# Install docker certificate if required
# this is management network for now
REGISTRY_IP=$(get_ip controller)
mkdir -p /etc/docker/certs.d/$REGISTRY_IP:9001/
chmod 700 /etc/docker/certs.d/$REGISTRY_IP:9001/
cp $CONFIG_DIR/registry-cert.crt /etc/docker/certs.d/$REGISTRY_IP:9001/registry-cert.crt
mkdir -p /etc/docker/certs.d/registry.local:9001/
chmod 700 /etc/docker/certs.d/registry.local:9001/
cp $CONFIG_DIR/registry-cert.crt /etc/docker/certs.d/registry.local:9001/registry-cert.crt
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/registry-cert.crt to docker dir"