Merge "Uplift Ceph charts to the Mimic release"
This commit is contained in:
commit
0770465962
@ -25,13 +25,13 @@ release_group: null
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_mds: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_mgr: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
||||||
image_repo_sync: docker.io/docker:17.07.0
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
active: false
|
active: false
|
||||||
exclude:
|
exclude:
|
||||||
@ -357,8 +357,8 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_version} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -24,12 +24,12 @@ deployment:
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
ceph_mon: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_mon: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_mon_check: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_mon_check: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
||||||
image_repo_sync: docker.io/docker:17.07.0
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
active: false
|
active: false
|
||||||
exclude:
|
exclude:
|
||||||
@ -228,8 +228,8 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_version} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import
|
|||||||
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||||
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
||||||
|
|
||||||
if [[ $(ceph -v | egrep -q "12.2|luminous"; echo $?) -ne 0 ]]; then
|
if [[ $(ceph -v | egrep -q "mimic|luminous"; echo $?) -ne 0 ]]; then
|
||||||
echo "ERROR- need Luminous release"
|
echo "ERROR- need Luminous/Mimic release"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -20,11 +20,11 @@
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_osd: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_osd: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
||||||
image_repo_sync: docker.io/docker:17.07.0
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
active: false
|
active: false
|
||||||
exclude:
|
exclude:
|
||||||
@ -124,7 +124,7 @@ conf:
|
|||||||
# match the failure domain used on your CRUSH rules for pools. For example with a crush rule of
|
# match the failure domain used on your CRUSH rules for pools. For example with a crush rule of
|
||||||
# rack_replicated_rule you would specify "rack" as the `failure_domain` to use.
|
# rack_replicated_rule you would specify "rack" as the `failure_domain` to use.
|
||||||
# `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration
|
# `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration
|
||||||
# as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/luminous/rados/operations/crush-map/
|
# as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/mimic/rados/operations/crush-map/
|
||||||
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
|
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
|
||||||
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
|
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
|
||||||
# when using host based overrides.
|
# when using host based overrides.
|
||||||
@ -134,6 +134,7 @@ conf:
|
|||||||
|
|
||||||
# NOTE(supamatt): Add a configurable option to reset the past interval time of a PG.
|
# NOTE(supamatt): Add a configurable option to reset the past interval time of a PG.
|
||||||
# This solves an open bug within Ceph Luminous releases. https://tracker.ceph.com/issues/21142
|
# This solves an open bug within Ceph Luminous releases. https://tracker.ceph.com/issues/21142
|
||||||
|
# Not required for Mimic releases.
|
||||||
osd_pg_interval_fix: "false"
|
osd_pg_interval_fix: "false"
|
||||||
|
|
||||||
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
||||||
@ -210,8 +211,8 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_version} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -28,12 +28,12 @@ release_group: null
|
|||||||
images:
|
images:
|
||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v1.1.0-k8s1.10'
|
ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v1.1.0-k8s1.10'
|
||||||
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v1.1.0-k8s1.10'
|
ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v1.1.0-k8s1.10'
|
||||||
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
||||||
image_repo_sync: docker.io/docker:17.07.0
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
local_registry:
|
local_registry:
|
||||||
active: false
|
active: false
|
||||||
exclude:
|
exclude:
|
||||||
@ -161,8 +161,8 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_version} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ cp -va /tmp/ceph.conf /etc/ceph/ceph.conf
|
|||||||
cat >> /etc/ceph/ceph.conf <<EOF
|
cat >> /etc/ceph/ceph.conf <<EOF
|
||||||
|
|
||||||
[client.rgw.$(hostname -s)]
|
[client.rgw.$(hostname -s)]
|
||||||
rgw_frontends = "civetweb port=${RGW_CIVETWEB_PORT}"
|
rgw_frontends = "beast port=${RGW_FRONTEND_PORT}"
|
||||||
rgw_keystone_url = "${KEYSTONE_URL}"
|
rgw_keystone_url = "${KEYSTONE_URL}"
|
||||||
rgw_keystone_admin_user = "${OS_USERNAME}"
|
rgw_keystone_admin_user = "${OS_USERNAME}"
|
||||||
rgw_keystone_admin_password = "${OS_PASSWORD}"
|
rgw_keystone_admin_password = "${OS_PASSWORD}"
|
||||||
|
@ -54,7 +54,7 @@ if [ ! -e "${RGW_KEYRING}" ]; then
|
|||||||
chmod 0600 "${RGW_KEYRING}"
|
chmod 0600 "${RGW_KEYRING}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RGW_FRONTENDS="civetweb port=${RGW_CIVETWEB_PORT}"
|
RGW_FRONTENDS="beast port=${RGW_FRONTEND_PORT}"
|
||||||
|
|
||||||
/usr/bin/radosgw \
|
/usr/bin/radosgw \
|
||||||
--cluster "${CLUSTER}" \
|
--cluster "${CLUSTER}" \
|
||||||
|
@ -83,7 +83,7 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
- name: KEYSTONE_URL
|
- name: KEYSTONE_URL
|
||||||
value: {{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.identity.path.default | quote }}
|
value: {{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.identity.path.default | quote }}
|
||||||
- name: RGW_CIVETWEB_PORT
|
- name: RGW_FRONTEND_PORT
|
||||||
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
|
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
|
||||||
command:
|
command:
|
||||||
- /tmp/rgw-init-keystone.sh
|
- /tmp/rgw-init-keystone.sh
|
||||||
@ -106,7 +106,7 @@ spec:
|
|||||||
env:
|
env:
|
||||||
- name: CLUSTER
|
- name: CLUSTER
|
||||||
value: "ceph"
|
value: "ceph"
|
||||||
- name: RGW_CIVETWEB_PORT
|
- name: RGW_FRONTEND_PORT
|
||||||
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
|
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
|
||||||
command:
|
command:
|
||||||
- /tmp/rgw-start.sh
|
- /tmp/rgw-start.sh
|
||||||
|
@ -27,9 +27,9 @@ images:
|
|||||||
pull_policy: IfNotPresent
|
pull_policy: IfNotPresent
|
||||||
tags:
|
tags:
|
||||||
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
|
ceph_rgw: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
|
||||||
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
|
||||||
image_repo_sync: docker.io/docker:17.07.0
|
image_repo_sync: 'docker.io/docker:17.07.0'
|
||||||
rgw_s3_admin: 'docker.io/port/ceph-config-helper:v1.10.3'
|
rgw_s3_admin: 'docker.io/port/ceph-config-helper:v1.10.3'
|
||||||
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
|
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
|
||||||
ks_service: 'docker.io/openstackhelm/heat:newton'
|
ks_service: 'docker.io/openstackhelm/heat:newton'
|
||||||
@ -62,11 +62,11 @@ pod:
|
|||||||
enabled: false
|
enabled: false
|
||||||
rgw:
|
rgw:
|
||||||
requests:
|
requests:
|
||||||
memory: "5Mi"
|
memory: "128Mi"
|
||||||
cpu: "250m"
|
cpu: "250m"
|
||||||
limits:
|
limits:
|
||||||
memory: "50Mi"
|
memory: "512Mi"
|
||||||
cpu: "500m"
|
cpu: "1000m"
|
||||||
jobs:
|
jobs:
|
||||||
ceph-rgw-storage-init:
|
ceph-rgw-storage-init:
|
||||||
requests:
|
requests:
|
||||||
@ -294,6 +294,8 @@ conf:
|
|||||||
rgw_thread_pool_size: 512
|
rgw_thread_pool_size: 512
|
||||||
rgw_num_rados_handles: 4
|
rgw_num_rados_handles: 4
|
||||||
rgw_override_bucket_index_max_shards: 8
|
rgw_override_bucket_index_max_shards: 8
|
||||||
|
#NOTE (supamatt): Unfortunately we do not conform to S3 compliant names with some of our charts
|
||||||
|
rgw_relaxed_s3_bucket_names: true
|
||||||
rgw_s3:
|
rgw_s3:
|
||||||
enabled: false
|
enabled: false
|
||||||
admin_caps: "users=*;buckets=*;zone=*"
|
admin_caps: "users=*;buckets=*;zone=*"
|
||||||
@ -364,8 +366,8 @@ bootstrap:
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous")
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_version} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,8 @@ set -ex
|
|||||||
ceph -s
|
ceph -s
|
||||||
function ensure_pool () {
|
function ensure_pool () {
|
||||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||||
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
|
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
|
||||||
if [[ ${test_luminous} -gt 0 ]]; then
|
if [[ ${test_mimic} -gt 0 ]]; then
|
||||||
ceph osd pool application enable $1 $3
|
ceph osd pool application enable $1 $3
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ sudo sed -i '/^::1/c\::1 localhost6 localhost6.localdomain6' /etc/hosts
|
|||||||
# NOTE: Install required packages on host
|
# NOTE: Install required packages on host
|
||||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 460F3994
|
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 460F3994
|
||||||
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
|
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
|
||||||
sudo add-apt-repository "deb https://download.ceph.com/debian-luminous/ ${RELEASE_NAME} main"
|
sudo add-apt-repository "deb https://download.ceph.com/debian-mimic/ ${RELEASE_NAME} main"
|
||||||
sudo -E apt-get update
|
sudo -E apt-get update
|
||||||
sudo -E apt-get install -y \
|
sudo -E apt-get install -y \
|
||||||
docker.io \
|
docker.io \
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
- name: ubuntu | ensure community ceph repository exists
|
- name: ubuntu | ensure community ceph repository exists
|
||||||
when: ansible_distribution == 'Ubuntu'
|
when: ansible_distribution == 'Ubuntu'
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "deb https://download.ceph.com/debian-luminous/ {{ ansible_lsb.codename }} main"
|
repo: "deb https://download.ceph.com/debian-mimic/ {{ ansible_lsb.codename }} main"
|
||||||
state: present
|
state: present
|
||||||
update_cache: yes
|
update_cache: yes
|
||||||
|
|
||||||
@ -33,7 +33,7 @@
|
|||||||
name: ceph
|
name: ceph
|
||||||
description: "Ceph community packages for Redhat/Centos"
|
description: "Ceph community packages for Redhat/Centos"
|
||||||
gpgkey: "https://download.ceph.com/keys/release.asc"
|
gpgkey: "https://download.ceph.com/keys/release.asc"
|
||||||
baseurl: "https://download.ceph.com/rpm-luminous/el7/$basearch"
|
baseurl: "https://download.ceph.com/rpm-mimic/el7/$basearch"
|
||||||
gpgcheck: yes
|
gpgcheck: yes
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
@ -73,6 +73,14 @@
|
|||||||
- ceph-common
|
- ceph-common
|
||||||
- rbd-nbd
|
- rbd-nbd
|
||||||
|
|
||||||
|
- name: ubuntu | uninstall packages
|
||||||
|
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||||
|
apt:
|
||||||
|
name: "{{item}}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- ceph
|
||||||
|
|
||||||
- name: centos | installing packages
|
- name: centos | installing packages
|
||||||
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
|
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
|
||||||
yum:
|
yum:
|
||||||
@ -82,6 +90,14 @@
|
|||||||
- ceph-common
|
- ceph-common
|
||||||
- rbd-nbd
|
- rbd-nbd
|
||||||
|
|
||||||
|
- name: centos | installing packages
|
||||||
|
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
|
||||||
|
yum:
|
||||||
|
name: "{{item}}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- ceph
|
||||||
|
|
||||||
- name: blacklist kernel RBD driver module
|
- name: blacklist kernel RBD driver module
|
||||||
when: kubelet.pv_support_ceph
|
when: kubelet.pv_support_ceph
|
||||||
copy:
|
copy:
|
||||||
|
@ -2,7 +2,7 @@ FROM docker.io/ubuntu:xenial
|
|||||||
MAINTAINER pete.birley@att.com
|
MAINTAINER pete.birley@att.com
|
||||||
|
|
||||||
ARG LIBVIRT_VERSION=ocata
|
ARG LIBVIRT_VERSION=ocata
|
||||||
ARG CEPH_RELEASE=luminous
|
ARG CEPH_RELEASE=mimic
|
||||||
ARG PROJECT=nova
|
ARG PROJECT=nova
|
||||||
ARG UID=42424
|
ARG UID=42424
|
||||||
ARG GID=42424
|
ARG GID=42424
|
||||||
|
@ -18,7 +18,7 @@ SHELL := /bin/bash
|
|||||||
LIBVIRT_VERSION ?= 1.3.1-1ubuntu10.24
|
LIBVIRT_VERSION ?= 1.3.1-1ubuntu10.24
|
||||||
DISTRO ?= ubuntu
|
DISTRO ?= ubuntu
|
||||||
DISTRO_RELEASE ?= xenial
|
DISTRO_RELEASE ?= xenial
|
||||||
CEPH_RELEASE ?= luminous
|
CEPH_RELEASE ?= mimic
|
||||||
|
|
||||||
DOCKER_REGISTRY ?= docker.io
|
DOCKER_REGISTRY ?= docker.io
|
||||||
IMAGE_NAME ?= libvirt
|
IMAGE_NAME ?= libvirt
|
||||||
|
@ -33,7 +33,7 @@ repo run:
|
|||||||
LIBVIRT_VERSION=1.3.1-1ubuntu10.24
|
LIBVIRT_VERSION=1.3.1-1ubuntu10.24
|
||||||
DISTRO=ubuntu
|
DISTRO=ubuntu
|
||||||
DISTRO_RELEASE=xenial
|
DISTRO_RELEASE=xenial
|
||||||
CEPH_RELEASE=luminous
|
CEPH_RELEASE=mimic
|
||||||
|
|
||||||
sudo docker build \
|
sudo docker build \
|
||||||
--network=host \
|
--network=host \
|
||||||
|
Loading…
Reference in New Issue
Block a user