6496cfc0ba
In a deployment that has both Ceph or Swift deployed it can be useful to seperate the network traffic. This change adds support for dedicated storage networks for both Ceph and Swift. By default, the storage hosts are attached to the following networks: * Overcloud admin network * Internal network * Storage network * Storage management network This adds four additional networks, which can be used to seperate the storage network traffic as follows: * Ceph storage network (ceph_storage_net_name) is used to carry Ceph storage data traffic. Defaults to the storage network (storage_net_name). * Ceph storage management network (ceph_storage_mgmt_net_name) is used to carry storage management traffic. Defaults to the storage management network (storage_mgmt_net_name). * Swift storage network (swift_storage_net_name) is used to carry Swift storage data traffic. Defaults to the storage network (storage_net_name). * Swift storage replication network (swift_storage_replication_net_name) is used to carry storage management traffic. Defaults to the storage management network (storage_mgmt_net_name). This change also includes several improvements to Swift device management and ring generation. The device management and ring generation are now separate, with device management occurring during 'kayobe overcloud host configure', and ring generation during a new command, 'kayobe overcloud swift rings generate'. For the device management, we now use standard Ansible modules rather than commands for device preparation. File system labels can be configured for each device individually. For ring generation, all commands are run on a single host, by default a host in the Swift storage group. A python script runs in one of the kolla Swift containers, which consumes an autogenerated YAML config file that defines the layout of the rings. Change-Id: Iedc7535532d706f02d710de69b422abf2f6fe54c
131 lines
3.5 KiB
Python
131 lines
3.5 KiB
Python
#!/usr/bin/env python
|
|
|
|
"""
|
|
Script to build a Swift ring from a declarative YAML configuration. This has
|
|
been built via a script to avoid repeated 'docker exec' commands which could
|
|
take a long time.
|
|
|
|
Usage:
|
|
|
|
python swift-ring-builder.py <config file path> <build path> <service name>
|
|
|
|
Example:
|
|
|
|
python swift-ring-builder.py /path/to/config.yml /path/to/builds object
|
|
|
|
Example configuration format:
|
|
|
|
---
|
|
part_power: 10
|
|
replication_count: 3
|
|
min_part_hours: 1
|
|
hosts:
|
|
- host: swift1
|
|
region: 1
|
|
zone: 1
|
|
ip: 10.0.0.1
|
|
port: 6001
|
|
replication_ip: 10.1.0.1
|
|
replication_port: 6001
|
|
devices:
|
|
- device: /dev/sdb
|
|
weight: 100
|
|
- device: /dev/sdc
|
|
weight: 100
|
|
"""
|
|
|
|
from __future__ import print_function
|
|
import subprocess
|
|
import sys
|
|
|
|
import yaml
|
|
|
|
|
|
class RingBuilder(object):
|
|
"""Helper class for building Swift rings."""
|
|
|
|
def __init__(self, build_path, service_name):
|
|
self.build_path = build_path
|
|
self.service_name = service_name
|
|
|
|
def get_base_command(self):
|
|
return [
|
|
'swift-ring-builder',
|
|
'%s/%s.builder' % (self.build_path, self.service_name),
|
|
]
|
|
|
|
def create(self, part_power, replication_count, min_part_hours):
|
|
cmd = self.get_base_command()
|
|
cmd += [
|
|
'create',
|
|
"{}".format(part_power),
|
|
"{}".format(replication_count),
|
|
"{}".format(min_part_hours),
|
|
]
|
|
try:
|
|
subprocess.check_call(cmd)
|
|
except subprocess.CalledProcessError:
|
|
print("Failed to create %s ring" % self.service_name)
|
|
sys.exit(1)
|
|
|
|
def add_device(self, host, device):
|
|
cmd = self.get_base_command()
|
|
cmd += [
|
|
'add',
|
|
'--region', "{}".format(host['region']),
|
|
'--zone', "{}".format(host['zone']),
|
|
'--ip', host['ip'],
|
|
'--port', "{}".format(host['port']),
|
|
'--replication-ip', host['replication_ip'],
|
|
'--replication-port', "{}".format(host['replication_port']),
|
|
'--device', device['device'],
|
|
'--weight', "{}".format(device['weight']),
|
|
]
|
|
try:
|
|
subprocess.check_call(cmd)
|
|
except subprocess.CalledProcessError:
|
|
print("Failed to add device %s on host %s to %s ring" %
|
|
(host['host'], device['device'], self.service_name))
|
|
sys.exit(1)
|
|
|
|
def rebalance(self):
|
|
cmd = self.get_base_command()
|
|
cmd += [
|
|
'rebalance',
|
|
]
|
|
try:
|
|
subprocess.check_call(cmd)
|
|
except subprocess.CalledProcessError:
|
|
print("Failed to rebalance %s ring" % self.service_name)
|
|
sys.exit(1)
|
|
|
|
|
|
def build_rings(config, build_path, service_name):
|
|
builder = RingBuilder(build_path, service_name)
|
|
builder.create(config['part_power'], config['replication_count'],
|
|
config['min_part_hours'])
|
|
for host in config['hosts']:
|
|
devices = host['devices']
|
|
# If no devices are present for this host, this will be None.
|
|
if devices is None:
|
|
continue
|
|
for device in devices:
|
|
builder.add_device(host, device)
|
|
builder.rebalance()
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) != 4:
|
|
raise Exception("Usage: {0} <config file path> <build path> "
|
|
"<service name>")
|
|
config_path = sys.argv[1]
|
|
build_path = sys.argv[2]
|
|
service_name = sys.argv[3]
|
|
with open(config_path) as f:
|
|
config = yaml.load(f)
|
|
build_rings(config, build_path, service_name)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|