openstack-helm-infra/ceph-rgw/values.yaml
Stephen Taylor 016b56e586 Ceph Nautilus compatibility
This change updates the Ceph charts to use Ceph Nautilus images
built on Ubuntu Bionic instead of Xenial. The mirror that hosts
Ceph packages only provides Nautilus packages for Bionic at
present, so this is necessary for Nautilus deployment.

There are also several configuration and scripting changes
included to provide compatibility with Ceph Nautilus. Most of
these simply allow existing logic to execute for Nautilus
deployments, but some logical changes are required to support
Nautilus as well.

NOTE: The cephfs test has been disabled because it was failing
the gate. This test has passed in multiple dev environments, and
since cephfs isn't used by any openstack-helm-infra components we
don't want this to block getting this change merged. The gate
issue will be investigated and addressed in a subsequent patch
set.

Change-Id: Id2d9d7b35d4dc66e93a0aacc9ea514e85ae13467
2019-12-17 18:47:24 +00:00

576 lines
14 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for ceph-client.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
deployment:
ceph: false
release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
pod:
security_context:
rgw:
pod:
runAsUser: 64045
container:
init_dirs:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_init:
runAsUser: 0
readOnlyRootFilesystem: true
rgw:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_storage_init:
pod:
runAsUser: 64045
container:
keyring_placement:
runAsUser: 0
readOnlyRootFilesystem: true
rgw_storage_init:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_s3_admin:
pod:
runAsUser: 64045
container:
keyring_placement:
runAsUser: 0
readOnlyRootFilesystem: true
create_s3_admin:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
dns_policy: "ClusterFirstWithHostNet"
replicas:
rgw: 2
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
resources:
enabled: false
rgw:
requests:
memory: "128Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "1000m"
jobs:
ceph-rgw-storage-init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks-endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rgw_s3_admin:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
network_policy:
rgw:
ingress:
- {}
egress:
- {}
ceph_client:
configmap: ceph-etc
secrets:
keyrings:
mon: ceph-mon-keyring
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: os-ceph-bootstrap-rgw-keyring
mgr: ceph-bootstrap-mgr-keyring
admin: pvc-ceph-client-key
identity:
admin: ceph-keystone-admin
swift: ceph-keystone-user
user_rgw: ceph-keystone-user-rgw
rgw_s3:
admin: radosgw-s3-admin-creds
tls:
object_store:
api:
public: ceph-tls-public
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/proxy-max-temp-file-size: "0"
external_policy_local: false
node_port:
enabled: false
port: 30004
public: 192.168.0.0/16
cluster: 192.168.0.0/16
conf:
templates:
keyring:
admin: |
[client.admin]
key = {{ key }}
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"
caps mgr = "allow *"
bootstrap:
rgw: |
[client.bootstrap-rgw]
key = {{ key }}
caps mgr = "allow profile bootstrap-rgw"
features:
rgw: true
pool:
#NOTE(portdirect): this drives a simple approximation of
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
# set to match the desired number of placement groups on each OSD.
crush:
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
# kernel this should be set to `hammer`
tunables: null
target:
#NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
# to match the number of nodes in the OSH gate.
osd: 5
pg_per_osd: 100
default:
#NOTE(portdirect): this should be 'same_host' for a single node
# cluster to be in a healthy state
crush_rule: replicated_rule
#NOTE(portdirect): this section describes the pools that will be managed by
# the ceph pool management job, as it tunes the pgs and crush rule, based on
# the above.
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 3
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 3
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 34.8
rgw:
config:
#NOTE (portdirect): See http://tracker.ceph.com/issues/21226
rgw_keystone_token_cache_size: 0
#NOTE (JCL): See http://tracker.ceph.com/issues/7073
rgw_gc_max_objs: 997
#NOTE (JCL): See http://tracker.ceph.com/issues/24937
#NOTE (JCL): See https://tracker.ceph.com/issues/24551
rgw_dynamic_resharding: false
rgw_num_rados_handles: 4
rgw_override_bucket_index_max_shards: 8
rgw_ks:
enabled: false
config:
rgw_keystone_api_version: 3
rgw_keystone_accepted_roles: "admin, member"
rgw_keystone_implicit_tenants: true
rgw_keystone_make_new_tenants: true
rgw_s3_auth_use_keystone: true
rgw_swift_account_in_url: true
rgw_swift_url: null
rgw_s3:
enabled: false
admin_caps: "users=*;buckets=*;zone=*"
config:
#NOTE (supamatt): Unfortunately we do not conform to S3 compliant names with some of our charts
rgw_relaxed_s3_bucket_names: true
ceph:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
objecter_inflight_op_bytes: "1073741824"
debug_ms: "0/0"
osd:
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_max_object_name_len: 256
ms_bind_port_min: 6800
ms_bind_port_max: 7100
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- ceph-rgw-image-repo-sync
services:
- endpoint: node
service: local_image_registry
targeted:
keystone:
rgw:
services:
- endpoint: internal
service: identity
s3:
rgw: {}
static:
bootstrap:
jobs: null
services:
- endpoint: internal
service: ceph_mon
rgw:
jobs:
- ceph-rgw-storage-init
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
ks_endpoints:
jobs:
- ceph-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rgw_s3_admin:
services:
- endpoint: internal
service: ceph_object_store
tests:
services:
- endpoint: internal
service: ceph_object_store
bootstrap:
enabled: false
script: |
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous")
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}
#ensure_pool volumes 8 cinder
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
namespace: null
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
os_auth_type: password
os_tenant_name: admin
swift:
role: admin
region_name: RegionOne
username: swift
password: password
project_name: service
user_domain_name: service
project_domain_name: service
os_auth_type: password
os_tenant_name: admin
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
object_store:
name: swift
namespace: null
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: /swift/v1/KEY_$(tenant_id)s
scheme:
default: http
port:
api:
default: 8088
public: 80
ceph_object_store:
name: radosgw
namespace: null
auth:
admin:
# NOTE(srwilkers): These defaults should be used for testing only, and
# should be changed before deploying to production
username: s3_admin
access_key: "admin_access_key"
secret_key: "admin_secret_key"
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
api:
default: 8088
public: 80
ceph_mon:
namespace: null
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
manifests:
configmap_ceph_templates: true
configmap_bin: true
configmap_bin_ks: true
configmap_test_bin: true
configmap_etc: true
deployment_rgw: true
ingress_rgw: true
job_ceph_rgw_storage_init: true
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_s3_admin: true
secret_s3_rgw: true
secret_keystone_rgw: true
secret_ingress_tls: true
secret_keystone: true
service_ingress_rgw: true
service_rgw: true
helm_tests: true
network_policy: false