# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Default values for ceph-client.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value

deployment:
  ceph: true

release_group: null

images:
  pull_policy: IfNotPresent
  tags:
    ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
    ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
    ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
    ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
    ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
    image_repo_sync: docker.io/docker:17.07.0
  local_registry:
    active: false
    exclude:
      - dep_check
      - image_repo_sync

labels:
  job:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  provisioner:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  mds:
    node_selector_key: ceph-mds
    node_selector_value: enabled
  mgr:
    node_selector_key: ceph-mgr
    node_selector_value: enabled

pod:
  dns_policy: "ClusterFirstWithHostNet"
  replicas:
    mds: 2
    mgr: 2
  affinity:
    anti:
      type:
        default: preferredDuringSchedulingIgnoredDuringExecution
      topologyKey:
        default: kubernetes.io/hostname
  resources:
    enabled: false
    mds:
      requests:
        memory: "10Mi"
        cpu: "250m"
      limits:
        memory: "50Mi"
        cpu: "500m"
    mgr:
      requests:
        memory: "5Mi"
        cpu: "250m"
      limits:
        memory: "50Mi"
        cpu: "500m"
    jobs:
      bootstrap:
        limits:
          memory: "1024Mi"
          cpu: "2000m"
        requests:
          memory: "128Mi"
          cpu: "500m"
      image_repo_sync:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"

secrets:
  keyrings:
    mon: ceph-mon-keyring
    mds: ceph-bootstrap-mds-keyring
    osd: ceph-bootstrap-osd-keyring
    rgw: ceph-bootstrap-rgw-keyring
    mgr: ceph-bootstrap-mgr-keyring
    admin: ceph-client-admin-keyring

network:
  public: 192.168.0.0/16
  cluster: 192.168.0.0/16

conf:
  features:
    mds: true
    mgr: true
  pool:
  #NOTE(portdirect): this drives a simple approximation of
  # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
  # expected number of osds in a cluster, and the `target.pg_per_osd` should be
  # set to match the desired number of placement groups on each OSD.
    crush:
      #NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
      # kernel this should be set to `hammer`
      tunables: null
    target:
      #NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
      # to match the number of nodes in the OSH gate.
      osd: 5
      pg_per_osd: 100
    default:
      #NOTE(portdirect): this should be 'same_host' for a single node
      # cluster to be in a healthy state
      crush_rule: replicated_rule
    #NOTE(portdirect): this section describes the pools that will be managed by
    # the ceph pool management job, as it tunes the pgs and crush rule, based on
    # the above.
    spec:
      # RBD pool
      - name: rbd
        application: rbd
        replication: 3
        percent_total_data: 40
      # CephFS pools
      - name: cephfs_metadata
        application: cephfs
        replication: 3
        percent_total_data: 5
      - name: cephfs_data
        application: cephfs
        replication: 3
        percent_total_data: 10
      # RadosGW pools
      - name: .rgw.root
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.control
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.data.root
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.gc
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.log
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.intent-log
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.meta
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.usage
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.users.keys
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.users.email
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.users.swift
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.users.uid
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.buckets.extra
        application: rgw
        replication: 3
        percent_total_data: 0.1
      - name: default.rgw.buckets.index
        application: rgw
        replication: 3
        percent_total_data: 3
      - name: default.rgw.buckets.data
        application: rgw
        replication: 3
        percent_total_data: 34.8
  ceph:
    global:
      # auth
      cephx: true
      cephx_require_signatures: false
      cephx_cluster_require_signatures: true
      cephx_service_require_signatures: false
    osd:
      osd_mkfs_type: xfs
      osd_mkfs_options_xfs: -f -i size=2048
      osd_max_object_name_len: 256
      ms_bind_port_min: 6800
      ms_bind_port_max: 7100

dependencies:
  dynamic:
    common:
      local_image_registry:
        jobs:
          - ceph-client-image-repo-sync
        services:
          - endpoint: node
            service: local_image_registry
  static:
    bootstrap:
      jobs: null
      services:
        - endpoint: internal
          service: ceph_mon
    cephfs_client_key_generator:
      jobs: null
    cephfs_provisioner:
      jobs:
        - ceph-rbd-pool
      services:
        - endpoint: internal
          service: ceph_mon
    mds:
      jobs:
        - ceph-storage-keys-generator
        - ceph-mds-keyring-generator
        - ceph-rbd-pool
      services:
        - endpoint: internal
          service: ceph_mon
    mgr:
      jobs:
        - ceph-storage-keys-generator
        - ceph-mgr-keyring-generator
      services:
        - endpoint: internal
          service: ceph_mon
    namespace_client_key_cleaner:
      jobs: null
    namespace_client_key_generator:
      jobs: null
    rbd_pool:
      services:
        - endpoint: internal
          service: ceph_mon
    rbd_provisioner:
      jobs:
        - ceph-rbd-pool
      services:
        - endpoint: internal
          service: ceph_mon
    image_repo_sync:
      services:
        - endpoint: internal
          service: local_image_registry

bootstrap:
  enabled: false
  script: |
    ceph -s
    function ensure_pool () {
      ceph osd pool stats $1 || ceph osd pool create $1 $2
      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
      if [[ ${test_luminous} -gt 0 ]]; then
        ceph osd pool application enable $1 $3
      fi
    }
    #ensure_pool volumes 8 cinder

# Uncomment below to enable mgr modules
# For a list of available modules:
#  http://docs.ceph.com/docs/master/mgr/
# This overrides mgr_initial_modules (default: restful, status)
# Any module not listed here will be disabled
ceph_mgr_enabled_modules:
  - restful
  - status
  - prometheus

# You can configure your mgr modules
# below. Each module has its own set
# of key/value. Refer to the doc
# above for more info. For example:
#ceph_mgr_modules_config:
#  dashboard:
#    port: 7000
#  localpool:
#    failure_domain: host
#    subtree: rack
#    pg_num: "128"
#    num_rep: "3"
#    min_size: "2"

endpoints:
  cluster_domain_suffix: cluster.local
  local_image_registry:
    name: docker-registry
    namespace: docker-registry
    hosts:
      default: localhost
      internal: docker-registry
      node: localhost
    host_fqdn_override:
      default: null
    port:
      registry:
        node: 5000
  ceph_mon:
    namespace: null
    hosts:
      default: ceph-mon
      discovery: ceph-mon-discovery
    host_fqdn_override:
      default: null
    port:
      mon:
        default: 6789
  ceph_mgr:
    namespace: null
    hosts:
      default: ceph-mgr
    host_fqdn_override:
      default: null
    port:
      mgr:
        default: 7000
      metrics:
        default: 9283
    scheme:
      default: http

monitoring:
  prometheus:
    enabled: true
    ceph_mgr:
      scrape: true
      port: 9283

manifests:
  configmap_bin: true
  configmap_etc: true
  deployment_mds: true
  deployment_mgr: true
  job_bootstrap: false
  job_cephfs_client_key: true
  job_image_repo_sync: true
  job_rbd_pool: true
  service_mgr: true