Make OpenStack use Ceph

This implements all the openstack pieces needed to make ceph work.

DocImpact
Change-Id: I1d24476a966602cf955e5ef872b0efb01319894a
Partially-Implements: blueprint ceph-container
Implements: blueprint kolla-live-migration
This commit is contained in:
Sam Yaple 2015-09-21 16:33:46 +00:00
parent a1b0518b9a
commit 99c7eb1997
29 changed files with 378 additions and 25 deletions

View File

@ -163,4 +163,4 @@ haproxy_user: "openstack"
################################# #################################
# Cinder - Block Storage options # Cinder - Block Storage options
################################# #################################
cinder_volume_driver: "lvm" cinder_volume_driver: "{{ 'ceph' if enable_ceph | bool else 'lvm' }}"

View File

@ -0,0 +1,3 @@
---
dependencies:
- { role: common, project_yaml: 'cinder.yml' }

View File

@ -0,0 +1,60 @@
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
with_items:
- "cinder-volume"
- "cinder-backup"
when: inventory_hostname in groups['cinder-volume']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
with_items:
- "cinder-volume"
- "cinder-backup"
when: inventory_hostname in groups['cinder-volume']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder
command: docker exec -it ceph_mon ceph osd pool create volumes 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder-backup
command: docker exec -it ceph_mon ceph osd pool create backups 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder
command: docker exec -it ceph_mon ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
register: cephx_key_cinder
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder-backup
command: docker exec -it ceph_mon ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
register: cephx_key_cinder_backup
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring
copy:
content: "{{ item.content }}\n\r"
dest: "{{ node_config_directory }}/{{ item.service_name }}/ceph.client.{{ item.key_name }}.keyring"
mode: "0600"
with_items:
- { service_name: "cinder-volume", key_name: "cinder", content: "{{ cephx_key_cinder.stdout }}" }
- { service_name: "cinder-backup", key_name: "cinder-backup", content: "{{ cephx_key_cinder_backup.stdout }}" }
when: inventory_hostname in groups['cinder-volume']

View File

@ -1,8 +1,27 @@
--- ---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml - include: register.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: config.yml - include: config.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: bootstrap.yml - include: bootstrap.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: start.yml - include: start.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']

View File

@ -6,6 +6,18 @@
"dest": "/etc/cinder/cinder.conf", "dest": "/etc/cinder/cinder.conf",
"owner": "cinder", "owner": "cinder",
"perm": "0600" "perm": "0600"
} }{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.cinder.keyring",
"dest": "/etc/ceph/ceph.client.cinder.keyring",
"owner": "cinder",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "cinder",
"perm": "0600"
}{% endif %}
] ]
} }

View File

@ -1,12 +1,18 @@
[DEFAULT] [DEFAULT]
verbose = true verbose = true
debug = true debug = true
use_syslog = True use_syslog = True
syslog_log_facility = LOG_LOCAL0 syslog_log_facility = LOG_LOCAL0
enable_v1_api=false enable_v1_api=false
volume_name_template = %s volume_name_template = %s
glance_api_servers = http://{{ kolla_internal_address }}:{{ glance_api_port }} glance_api_servers = http://{{ kolla_internal_address }}:{{ glance_api_port }}
glance_api_version = 2
os_region_name = {{ openstack_region_name }} os_region_name = {{ openstack_region_name }}
{% if cinder_volume_driver == "lvm" %} {% if cinder_volume_driver == "lvm" %}
default_volume_type = lvmdriver-1 default_volume_type = lvmdriver-1
enabled_backends = lvmdriver-1 enabled_backends = lvmdriver-1
@ -14,10 +20,24 @@ enabled_backends = lvmdriver-1
default_volume_type = rbd-1 default_volume_type = rbd-1
enabled_backends = rbd-1 enabled_backends = rbd-1
{% endif %} {% endif %}
{% if service_name == "cinder-backup" and cinder_volume_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% endif %}
osapi_volume_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }} osapi_volume_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
osapi_volume_listen_port = {{ cinder_api_port }} osapi_volume_listen_port = {{ cinder_api_port }}
api_paste_config = /etc/cinder/api-paste.ini api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone auth_strategy = keystone
[database] [database]
@ -52,7 +72,12 @@ volume_backend_name = lvmdriver-1
[rbd-1] [rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder rbd_user = cinder
rbd_secret_uuid = {{ metadata_secret }} rbd_secret_uuid = {{ rbd_secret_uuid }}
{% endif %} {% endif %}

View File

@ -2,4 +2,3 @@
- include: config.yml - include: config.yml
- include: start.yml - include: start.yml

View File

@ -1,3 +1,3 @@
--- ---
dependencies: dependencies:
- { role: common } - { role: common, project_yaml: 'glance.yml' }

View File

@ -0,0 +1,35 @@
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/glance-api"
state: "directory"
when: inventory_hostname in groups['glance-api']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/glance-api/ceph.conf"
when: inventory_hostname in groups['glance-api']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for glance
command: docker exec -it ceph_mon ceph osd pool create images 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring
command: docker exec -it ceph_mon ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring
copy:
content: "{{ cephx_key.stdout }}\n\r"
dest: "{{ node_config_directory }}/glance-api/ceph.client.glance.keyring"
mode: "0600"
when: inventory_hostname in groups['glance-api']

View File

@ -1,8 +1,19 @@
--- ---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml - include: register.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: config.yml - include: config.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: bootstrap.yml - include: bootstrap.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: start.yml - include: start.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']

View File

@ -12,6 +12,10 @@ registry_host = {{ kolla_internal_address }}
use_syslog = True use_syslog = True
syslog_log_facility = LOG_LOCAL0 syslog_log_facility = LOG_LOCAL0
{% if enable_ceph | bool %}
show_image_direct_url= True
{% endif %}
[database] [database]
connection = mysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }} connection = mysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
@ -29,5 +33,13 @@ password = {{ glance_keystone_password }}
flavor = keystone flavor = keystone
[glance_store] [glance_store]
{% if enable_ceph | bool %}
default_store = rbd
stores = rbd
rbd_store_user = glance
rbd_store_pool = images
rbd_store_chunk_size = 8
{% else %}
default_store = file default_store = file
filesystem_store_datadir = /var/lib/glance/images/ filesystem_store_datadir = /var/lib/glance/images/
{% endif %}

View File

@ -6,6 +6,18 @@
"dest": "/etc/glance/glance-api.conf", "dest": "/etc/glance/glance-api.conf",
"owner": "glance", "owner": "glance",
"perm": "0600" "perm": "0600"
} }{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.glance.keyring",
"dest": "/etc/ceph/ceph.client.glance.keyring",
"owner": "glance",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "glance",
"perm": "0600"
}{% endif %}
] ]
} }

View File

@ -1,3 +1,3 @@
--- ---
dependencies: dependencies:
- { role: common } - { role: common, project_yaml: 'nova.yml' }

View File

@ -0,0 +1,63 @@
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
with_items:
- "nova-compute"
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
with_items:
- "nova-compute"
- "nova-libvirt"
when: inventory_hostname in groups['compute']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for vms
command: docker exec -it ceph_mon ceph osd pool create vms 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for nova
command: docker exec -it ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for libvirt
command: docker exec -it ceph_mon ceph auth get-key client.nova
register: cephx_raw_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring for nova
copy:
content: "{{ cephx_key.stdout }}\n\r"
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
mode: "0600"
when: inventory_hostname in groups['compute']
- name: Pushing secrets xml for libvirt
template:
src: roles/nova/templates/secret.xml.j2
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.xml"
mode: "0600"
when: inventory_hostname in groups['compute']
- name: Pushing secrets key for libvirt
copy:
content: "{{ cephx_raw_key.stdout }}"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.base64"
mode: "0600"
when: inventory_hostname in groups['compute']

View File

@ -129,6 +129,12 @@
dest: "{{ node_config_directory }}/nova-compute/config.json" dest: "{{ node_config_directory }}/nova-compute/config.json"
when: inventory_hostname in groups['compute'] when: inventory_hostname in groups['compute']
- name: Copying over config(s)
template:
src: "libvirtd.conf.j2"
dest: "{{ node_config_directory }}/nova-libvirt/libvirtd.conf"
when: inventory_hostname in groups['compute']
- include: ../../config.yml - include: ../../config.yml
vars: vars:
service_name: "nova-novncproxy" service_name: "nova-novncproxy"

View File

@ -1,8 +1,31 @@
--- ---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml - include: register.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: config.yml - include: config.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: bootstrap.yml - include: bootstrap.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: start.yml - include: start.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']

View File

@ -19,6 +19,7 @@
- "{{ node_config_directory }}/nova-libvirt/:/opt/kolla/config_files/:ro" - "{{ node_config_directory }}/nova-libvirt/:/opt/kolla/config_files/:ro"
- "/run:/run" - "/run:/run"
- "/sys/fs/cgroup:/sys/fs/cgroup" - "/sys/fs/cgroup:/sys/fs/cgroup"
- "/lib/modules:/lib/modules:ro"
volumes_from: volumes_from:
- nova_data - nova_data
env: env:

View File

@ -0,0 +1,11 @@
listen_tcp = 1
auth_tcp = "none"
ca_file = ""
log_level = 2
log_outputs = "2:file:/var/log/libvirt/libvirtd.log"
listen_addr = "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
unix_sock_group = "root"
unix_sock_ro_perms = "0777"
unix_sock_rw_perms = "0770"
auth_unix_ro = "none"
auth_unix_rw = "none"

View File

@ -6,6 +6,18 @@
"dest": "/etc/nova/nova.conf", "dest": "/etc/nova/nova.conf",
"owner": "nova", "owner": "nova",
"perm": "0600" "perm": "0600"
} }{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.nova.keyring",
"dest": "/etc/ceph/ceph.client.nova.keyring",
"owner": "nova",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}
] ]
} }

View File

@ -1,4 +1,23 @@
{ {
"command": "/usr/sbin/libvirtd", "command": "/usr/sbin/libvirtd -l",
"config_files": [] "config_files": [
{
"source": "/opt/kolla/config_files/libvirt.conf",
"dest": "/etc/libvirt/libvirtd.conf",
"owner": "root",
"perm": "0600"
}{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/secrets",
"dest": "/etc/libvirt/secrets",
"owner": "root",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "root",
"perm": "0600"
}{% endif %}
]
} }

View File

@ -87,3 +87,15 @@ user_domain_id = default
project_name = service project_name = service
username = nova username = nova
password = {{ nova_keystone_password }} password = {{ nova_keystone_password }}
{% if enable_ceph | bool %}
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
rbd_secret_uuid = {{ rbd_secret_uuid }}
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap
{% endif %}

View File

@ -0,0 +1,6 @@
<secret ephemeral='no' private='no'>
<uuid>{{ rbd_secret_uuid }}</uuid>
<usage type='ceph'>
<name>client.nova secret</name>
</usage>
</secret>

View File

@ -24,11 +24,11 @@
roles: roles:
- { role: swift, tags: swift, when: enable_swift | bool } - { role: swift, tags: swift, when: enable_swift | bool }
- hosts: [glance-api, glance-registry] - hosts: [glance-api, glance-registry, ceph-mon]
roles: roles:
- { role: glance, tags: glance, when: enable_glance | bool } - { role: glance, tags: glance, when: enable_glance | bool }
- hosts: [nova-api, nova-conductor, nova-consoleauth, nova-novncproxy, nova-scheduler, compute] - hosts: [nova-api, nova-conductor, nova-consoleauth, nova-novncproxy, nova-scheduler, compute, ceph-mon]
roles: roles:
- { role: nova, tags: nova, when: enable_nova | bool } - { role: nova, tags: nova, when: enable_nova | bool }
@ -36,7 +36,7 @@
roles: roles:
- { role: neutron, tags: neutron, when: enable_neutron | bool } - { role: neutron, tags: neutron, when: enable_neutron | bool }
- hosts: [cinder-api, cinder-backup, cinder-scheduler, cinder-volume] - hosts: [cinder-api, cinder-backup, cinder-scheduler, cinder-volume, ceph-mon]
roles: roles:
- { role: cinder, tags: cinder, when: enable_cinder | bool } - { role: cinder, tags: cinder, when: enable_cinder | bool }

View File

@ -28,8 +28,11 @@ RUN yum -y install lvm2 \
{% elif base_distro in ['ubuntu', 'debian'] %} {% elif base_distro in ['ubuntu', 'debian'] %}
RUN apt-get install -y --no-install-recommends lvm2 \ RUN apt-get install -y --no-install-recommends \
&& apt-get clean lvm2 \
ceph-common \
&& apt-get clean \
&& mkdir -p /etc/ceph
{% endif %} {% endif %}

View File

@ -19,6 +19,15 @@ RUN echo '{{ install_type }} not yet available for {{ base_distro }}' \
{% endif %} {% endif %}
{% elif install_type == 'source' %} {% elif install_type == 'source' %}
{% if base_distro in ['ubuntu', 'debian'] %}
RUN apt-get install -y --no-install-recommends \
python-rbd \
python-rados \
&& apt-get clean \
&& mkdir -p /etc/ceph/
{% endif %}
ADD glance-base-archive /glance-base-source ADD glance-base-archive /glance-base-source
RUN ln -s glance-base-source/* glance \ RUN ln -s glance-base-source/* glance \

View File

@ -22,8 +22,11 @@ RUN yum -y install \
{% elif base_distro in ['ubuntu', 'debian'] %} {% elif base_distro in ['ubuntu', 'debian'] %}
RUN apt-get install -y --no-install-recommends qemu-utils \ RUN apt-get install -y --no-install-recommends \
&& apt-get clean qemu-utils \
ceph-common \
&& apt-get clean \
&& mkdir -p /etc/ceph
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@ -15,12 +15,15 @@ RUN yum -y install \
{% elif base_distro in ['ubuntu', 'debian'] %} {% elif base_distro in ['ubuntu', 'debian'] %}
RUN apt-get install -y --no-install-recommends \ RUN apt-get install -y --no-install-recommends \
ceph-common \
libvirt-bin \ libvirt-bin \
dmidecode \ dmidecode \
pm-utils \ pm-utils \
qemu \ qemu \
qemu-block-extra \
ebtables \ ebtables \
&& apt-get clean && apt-get clean \
&& mkdir -p /etc/ceph
{% endif %} {% endif %}

View File

@ -8,12 +8,5 @@ source /opt/kolla/kolla-common.sh
python /opt/kolla/set_configs.py python /opt/kolla/set_configs.py
CMD=$(cat /run_command) CMD=$(cat /run_command)
# TODO(SamYaple): Tweak libvirt.conf rather than change permissions.
# Fix permissions for libvirt
if [[ -c /dev/kvm ]]; then
chmod 660 /dev/kvm
chown root:kvm /dev/kvm
fi
echo "Running command: ${CMD}" echo "Running command: ${CMD}"
exec $CMD exec $CMD

View File

@ -7,6 +7,7 @@
# Ceph options # Ceph options
#################### ####################
ceph_cluster_fsid: "5fba2fbc-551d-11e5-a8ce-01ef4c5cf93c" ceph_cluster_fsid: "5fba2fbc-551d-11e5-a8ce-01ef4c5cf93c"
rbd_secret_uuid: "bbc5b4d5-6fca-407d-807d-06a4f4a7bccb"
################### ###################