Update conf option tables for cinder

This re-runs the autodoc tool to generate new flagmappings for
cinder from the recent trunk. It starts to separate out
the 'storage' category into various driver components.

Change-Id: Iaa8770001fcb182403ce9f1b756d4732d06bc3f6
This commit is contained in:
Tom Fifield 2013-10-07 11:14:21 +00:00
parent f2d1270417
commit bb680dfdc7
40 changed files with 1034 additions and 639 deletions

View File

@ -47,14 +47,14 @@
<td>osapi_volume_base_URL=None</td>
<td>(StrOpt) Base URL that will be presented to users in links to the OpenStack Volume API</td>
</tr>
<tr>
<td>osapi_volume_ext_list=</td>
<td>(ListOpt) Specify list of extensions to load when using osapi_volume_extension option with cinder.api.contrib.select_extensions</td>
</tr>
<tr>
<td>osapi_volume_extension=['cinder.api.contrib.standard_extensions']</td>
<td>(MultiStrOpt) osapi volume extension to load</td>
</tr>
<tr>
<td>osapi_volume_ext_list=</td>
<td>(ListOpt) Specify list of extensions to load when using osapi_volume_extension option with cinder.api.contrib.select_extensions</td>
</tr>
<tr>
<td>transfer_api_class=cinder.transfer.api.API</td>
<td>(StrOpt) The full class name of the volume transfer API class</td>
@ -63,6 +63,18 @@
<td>volume_api_class=cinder.volume.api.API</td>
<td>(StrOpt) The full class name of the volume API class to use</td>
</tr>
<tr>
<td>volume_name_template=volume-%s</td>
<td>(StrOpt) Template string to be used to generate volume names</td>
</tr>
<tr>
<td>volume_transfer_key_length=16</td>
<td>(IntOpt) The number of characters in the autogenerated auth key.</td>
</tr>
<tr>
<td>volume_transfer_salt_length=8</td>
<td>(IntOpt) The number of characters in the salt.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -19,6 +19,10 @@
<td>backup_api_class=cinder.backup.api.API</td>
<td>(StrOpt) The full class name of the volume backup API class</td>
</tr>
<tr>
<td>backup_compression_algorithm=zlib</td>
<td>(StrOpt) Compression algorithm (None to disable)</td>
</tr>
<tr>
<td>backup_driver=cinder.backup.drivers.swift</td>
<td>(StrOpt) Driver to use for backups.</td>
@ -36,8 +40,12 @@
<td>(StrOpt) the topic volume backup nodes listen on</td>
</tr>
<tr>
<td>restore_discard_excess_bytes=True</td>
<td>(BoolOpt) If True, always discard excess bytes when restoring volumes.</td>
<td>snapshot_name_template=snapshot-%s</td>
<td>(StrOpt) Template string to be used to generate snapshot names</td>
</tr>
<tr>
<td>snapshot_same_host=True</td>
<td>(BoolOpt) Create volume from snapshot at the host where snapshot resides</td>
</tr>
</tbody>
</table>

View File

@ -39,6 +39,10 @@
<td>backup_ceph_user=cinder</td>
<td>(StrOpt) the Ceph user to connect with</td>
</tr>
<tr>
<td>restore_discard_excess_bytes=True</td>
<td>(BoolOpt) If True, always discard excess bytes when restoring volumes.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -15,10 +15,6 @@
</tr>
</thead>
<tbody>
<tr>
<td>backup_compression_algorithm=zlib</td>
<td>(StrOpt) Compression algorithm (None to disable)</td>
</tr>
<tr>
<td>backup_swift_auth=per_user</td>
<td>(StrOpt) Swift authentication mechanism</td>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for block-device</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>available_devices=</td>
<td>(ListOpt) List of all available devices</td>
</tr>
</tbody>
</table>
</para>

View File

@ -60,7 +60,7 @@
<td>(BoolOpt) make exception message format errors fatal</td>
</tr>
<tr>
<td>host=autodoc</td>
<td>host=docwork</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
</tr>
<tr>
@ -68,7 +68,7 @@
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host=autodoc</td>
<td>host=docwork</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
</tr>
<tr>
@ -85,7 +85,7 @@
</tr>
<tr>
<td>lio_initiator_iqns=</td>
<td>(StrOpt) Comma-separatd list of initiator IQNs allowed to connect to the iSCSI target. (From Nova compute nodes.)</td>
<td>(StrOpt) Comma-separated list of initiator IQNs allowed to connect to the iSCSI target. (From Nova compute nodes.)</td>
</tr>
<tr>
<td>lock_path=None</td>
@ -136,45 +136,17 @@
<td>(ListOpt) List of modules/decorators to monkey patch</td>
</tr>
<tr>
<td>my_ip=192.168.122.175</td>
<td>my_ip=115.146.84.189</td>
<td>(StrOpt) ip address of this host</td>
</tr>
<tr>
<td>no_snapshot_gb_quota=False</td>
<td>(BoolOpt) Whether snapshots count against GigaByte quota</td>
</tr>
<tr>
<td>nova_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL requests to nova</td>
</tr>
<tr>
<td>nova_ca_certificates_file=None</td>
<td>(StrOpt) Location of ca certicates file to use for nova client requests.</td>
</tr>
<tr>
<td>nova_catalog_admin_info=compute:nova:adminURL</td>
<td>(StrOpt) Same as nova_catalog_info, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_catalog_info=compute:nova:publicURL</td>
<td>(StrOpt) Info to match when looking for nova in the service catalog. Format is : separated values of the form: &lt;service_type&gt;:&lt;service_name&gt;:&lt;endpoint_type&gt;</td>
</tr>
<tr>
<td>nova_endpoint_admin_template=None</td>
<td>(StrOpt) Same as nova_endpoint_template, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_endpoint_template=None</td>
<td>(StrOpt) Override service catalog lookup with template for nova endpoint e.g. http://localhost:8774/v2/%(tenant_id)s</td>
</tr>
<tr>
<td>num_shell_tries=3</td>
<td>(IntOpt) number of times to attempt to run flakey shell commands</td>
</tr>
<tr>
<td>os_region_name=None</td>
<td>(StrOpt) region name of this node</td>
</tr>
<tr>
<td>password=None</td>
<td>(StrOpt) Password for Redis server. (optional)</td>
@ -187,38 +159,14 @@
<td>policy_file=policy.json</td>
<td>(StrOpt) JSON file representing policy</td>
</tr>
<tr>
<td>pool_size=None</td>
<td>(StrOpt) Size of thin provisioning pool (None uses entire cinder VG)</td>
</tr>
<tr>
<td>port=6379</td>
<td>(IntOpt) Use this port to connect to redis host.</td>
</tr>
<tr>
<td>pybasedir=/usr/lib/python2.7/site-packages</td>
<td>pybasedir=/home/ubuntu/cinder</td>
<td>(StrOpt) Directory where the cinder python module is installed</td>
</tr>
<tr>
<td>quota_driver=cinder.quota.DbQuotaDriver</td>
<td>(StrOpt) default driver to use for quota checks</td>
</tr>
<tr>
<td>quota_gigabytes=1000</td>
<td>(IntOpt) number of volume gigabytes (snapshots are also included) allowed per project</td>
</tr>
<tr>
<td>quota_snapshots=10</td>
<td>(IntOpt) number of volume snapshots allowed per project</td>
</tr>
<tr>
<td>quota_volumes=10</td>
<td>(IntOpt) number of volumes allowed per project</td>
</tr>
<tr>
<td>reservation_expire=86400</td>
<td>(IntOpt) number of seconds until a reservation expires</td>
</tr>
<tr>
<td>reserved_percentage=0</td>
<td>(IntOpt) The percentage of backend capacity is reserved</td>
@ -251,18 +199,6 @@
<td>sqlite_synchronous=True</td>
<td>(BoolOpt) If true, use synchronous mode for sqlite</td>
</tr>
<tr>
<td>ssh_conn_timeout=30</td>
<td>(IntOpt) SSH connection timeout in seconds</td>
</tr>
<tr>
<td>ssh_max_pool_conn=5</td>
<td>(IntOpt) Maximum ssh connections in the pool</td>
</tr>
<tr>
<td>ssh_min_pool_conn=1</td>
<td>(IntOpt) Minimum ssh connections in the pool</td>
</tr>
<tr>
<td>ssl_ca_file=None</td>
<td>(StrOpt) CA certificate file to use to verify connecting clients</td>
@ -299,10 +235,6 @@
<td>until_refresh=0</td>
<td>(IntOpt) count of reservations until usage is refreshed</td>
</tr>
<tr>
<td>use_default_quota_class=True</td>
<td>(BoolOpt) whether to use default quota class for default quota</td>
</tr>
<tr>
<td>use_forwarded_for=False</td>
<td>(BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.</td>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for compute</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>nova_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL requests to nova</td>
</tr>
<tr>
<td>nova_ca_certificates_file=None</td>
<td>(StrOpt) Location of ca certicates file to use for nova client requests.</td>
</tr>
<tr>
<td>nova_catalog_admin_info=compute:nova:adminURL</td>
<td>(StrOpt) Same as nova_catalog_info, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_catalog_info=compute:nova:publicURL</td>
<td>(StrOpt) Info to match when looking for nova in the service catalog. Format is : separated values of the form: &lt;service_type&gt;:&lt;service_name&gt;:&lt;endpoint_type&gt;</td>
</tr>
<tr>
<td>nova_endpoint_admin_template=None</td>
<td>(StrOpt) Same as nova_endpoint_template, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_endpoint_template=None</td>
<td>(StrOpt) Override service catalog lookup with template for nova endpoint e.g. http://localhost:8774/v2/%(tenant_id)s</td>
</tr>
<tr>
<td>os_region_name=None</td>
<td>(StrOpt) region name of this node</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for coraid</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>coraid_group=admin</td>
<td>(StrOpt) Name of group on Coraid ESM to which coraid_user belongs (must have admin privilege)</td>
</tr>
<tr>
<td>coraid_password=password</td>
<td>(StrOpt) Password to connect to Coraid ESM</td>
</tr>
<tr>
<td>coraid_repository_key=coraid_repository</td>
<td>(StrOpt) Volume Type key name to store ESM Repository Name</td>
</tr>
<tr>
<td>coraid_user=admin</td>
<td>(StrOpt) User name to connect to Coraid ESM</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for eqlx</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>eqlx_chap_login=admin</td>
<td>(StrOpt) Existing CHAP account name</td>
</tr>
<tr>
<td>eqlx_chap_password=password</td>
<td>(StrOpt) Password for specified CHAP account name</td>
</tr>
<tr>
<td>eqlx_cli_max_retries=5</td>
<td>(IntOpt) Maximum retry count for reconnection</td>
</tr>
<tr>
<td>eqlx_cli_timeout=30</td>
<td>(IntOpt) Timeout for the Group Manager cli command execution</td>
</tr>
<tr>
<td>eqlx_group_name=group-0</td>
<td>(StrOpt) Group name to use for creating volumes</td>
</tr>
<tr>
<td>eqlx_pool=default</td>
<td>(StrOpt) Pool in which volumes will be created</td>
</tr>
<tr>
<td>eqlx_use_chap=False</td>
<td>(BoolOpt) Use CHAP authentificaion for targets?</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for gpfs_volume</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>gpfs_images_dir=None</td>
<td>(StrOpt) Path to GPFS Glance repository as mounted on Nova nodes</td>
</tr>
<tr>
<td>gpfs_images_share_mode=None</td>
<td>(StrOpt) Set this if Glance image repo is on GPFS as well so that the image bits can be transferred efficiently between Glance and Cinder. Valid values are copy or copy_on_write. copy performs a full copy of the image, copy_on_write efficiently shares unmodified blocks of the image.</td>
</tr>
<tr>
<td>gpfs_max_clone_depth=0</td>
<td>(IntOpt) A lengthy chain of copy-on-write snapshots or clones could have impact on performance. This option limits the number of indirections required to reach a specific block. 0 indicates unlimited.</td>
</tr>
<tr>
<td>gpfs_mount_point_base=None</td>
<td>(StrOpt) Path to the directory on GPFS mount point where volumes are stored</td>
</tr>
<tr>
<td>gpfs_sparse_volumes=True</td>
<td>(BoolOpt) Create volumes as sparse files which take no space. If set to False volume is created as regular file. In this case volume creation may take a significantly longer time.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for hds</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml</td>
<td>(StrOpt) configuration file for HDS cinder plugin for HUS</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for huawei</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml</td>
<td>(StrOpt) config data for cinder huawei plugin</td>
</tr>
</tbody>
</table>
</para>

View File

@ -5,7 +5,7 @@
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backup</caption>
<caption>Description of configuration options for keymgr</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
@ -16,12 +16,12 @@
</thead>
<tbody>
<tr>
<td>snapshot_name_template=snapshot-%s</td>
<td>(StrOpt) Template string to be used to generate snapshot names</td>
<td>api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager</td>
<td>(StrOpt) The full class name of the key manager API class</td>
</tr>
<tr>
<td>snapshot_same_host=True</td>
<td>(BoolOpt) Create volume from snapshot at the host where snapshot resides</td>
<td>fixed_key=None</td>
<td>(StrOpt) Fixed key returned by key manager, specified in hex</td>
</tr>
</tbody>
</table>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for lvm</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>lvm_mirrors=0</td>
<td>(IntOpt) If set, create lvms with multiple mirrors. Note that this requires lvm_mirrors + 2 pvs with available space</td>
</tr>
<tr>
<td>lvm_type=default</td>
<td>(StrOpt) Type of LVM volumes to deploy; (default or thin)</td>
</tr>
<tr>
<td>pool_size=None</td>
<td>(StrOpt) Size of thin provisioning pool (None uses entire cinder VG)</td>
</tr>
<tr>
<td>volume_group=cinder-volumes</td>
<td>(StrOpt) Name for the VG that will contain exported volumes</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,76 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for netapp</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>expiry_thres_minutes=720</td>
<td>(IntOpt) Threshold minutes after which cache file can be cleaned.</td>
</tr>
<tr>
<td>netapp_login=None</td>
<td>(StrOpt) User name for the storage controller</td>
</tr>
<tr>
<td>netapp_password=None</td>
<td>(StrOpt) Password for the storage controller</td>
</tr>
<tr>
<td>netapp_server_hostname=None</td>
<td>(StrOpt) Host name for the storage controller</td>
</tr>
<tr>
<td>netapp_server_port=80</td>
<td>(IntOpt) Port number for the storage controller</td>
</tr>
<tr>
<td>netapp_size_multiplier=1.2</td>
<td>(FloatOpt) Volume size multiplier to ensure while creation</td>
</tr>
<tr>
<td>netapp_storage_family=ontap_cluster</td>
<td>(StrOpt) Storage family type.</td>
</tr>
<tr>
<td>netapp_storage_protocol=None</td>
<td>(StrOpt) Storage protocol type.</td>
</tr>
<tr>
<td>netapp_transport_type=http</td>
<td>(StrOpt) Transport type protocol</td>
</tr>
<tr>
<td>netapp_vfiler=None</td>
<td>(StrOpt) Vfiler to use for provisioning</td>
</tr>
<tr>
<td>netapp_volume_list=None</td>
<td>(StrOpt) Comma separated volumes to be used for provisioning</td>
</tr>
<tr>
<td>netapp_vserver=None</td>
<td>(StrOpt) Cluster vserver to use for provisioning</td>
</tr>
<tr>
<td>thres_avl_size_perc_start=20</td>
<td>(IntOpt) Threshold available percent to start cache cleaning.</td>
</tr>
<tr>
<td>thres_avl_size_perc_stop=60</td>
<td>(IntOpt) Threshold available percent to stop cache cleaning.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for nfs</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details.</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares.</td>
</tr>
<tr>
<td>nfs_oversub_ratio=1.0</td>
<td>(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.</td>
</tr>
<tr>
<td>nfs_shares_config=/etc/cinder/nfs_shares</td>
<td>(StrOpt) File with the list of available nfs shares</td>
</tr>
<tr>
<td>nfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>nfs_used_ratio=0.95</td>
<td>(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for quota</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>max_age=0</td>
<td>(IntOpt) number of seconds between subsequent usage refreshes</td>
</tr>
<tr>
<td>quota_driver=cinder.quota.DbQuotaDriver</td>
<td>(StrOpt) default driver to use for quota checks</td>
</tr>
<tr>
<td>quota_gigabytes=1000</td>
<td>(IntOpt) number of volume gigabytes (snapshots are also included) allowed per project</td>
</tr>
<tr>
<td>quota_snapshots=10</td>
<td>(IntOpt) number of volume snapshots allowed per project</td>
</tr>
<tr>
<td>quota_volumes=10</td>
<td>(IntOpt) number of volumes allowed per project</td>
</tr>
<tr>
<td>reservation_expire=86400</td>
<td>(IntOpt) number of seconds until a reservation expires</td>
</tr>
<tr>
<td>use_default_quota_class=True</td>
<td>(BoolOpt) whether to use default quota class for default quota</td>
</tr>
</tbody>
</table>
</para>

View File

@ -16,7 +16,7 @@
</thead>
<tbody>
<tr>
<td>allowed_rpc_exception_modules=cinder.openstack.common.exception,nova.exception,cinder.exception,exceptions</td>
<td>allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions</td>
<td>(ListOpt) Modules of exceptions that are permitted to be recreatedupon receiving exception data from an rpc call.</td>
</tr>
<tr>
@ -151,14 +151,14 @@
<td>rabbit_retry_interval=1</td>
<td>(IntOpt) how frequently to retry connecting with RabbitMQ</td>
</tr>
<tr>
<td>rabbit_use_ssl=False</td>
<td>(BoolOpt) connect over SSL for RabbitMQ</td>
</tr>
<tr>
<td>rabbit_userid=guest</td>
<td>(StrOpt) the RabbitMQ userid</td>
</tr>
<tr>
<td>rabbit_use_ssl=False</td>
<td>(BoolOpt) connect over SSL for RabbitMQ</td>
</tr>
<tr>
<td>rabbit_virtual_host=/</td>
<td>(StrOpt) the RabbitMQ virtual host</td>
@ -192,7 +192,7 @@
<td>(IntOpt) Number of ZeroMQ contexts, defaults to 1</td>
</tr>
<tr>
<td>rpc_zmq_host=autodoc</td>
<td>rpc_zmq_host=docwork</td>
<td>(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.</td>
</tr>
<tr>
@ -211,6 +211,10 @@
<td>rpc_zmq_topic_backlog=None</td>
<td>(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.</td>
</tr>
<tr>
<td>volume_topic=cinder-volume</td>
<td>(StrOpt) the topic volume nodes listen on</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for san-solaris</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>san_zfs_volume_base=rpool/</td>
<td>(StrOpt) The ZFS path under which to create zvols for volumes.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,64 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for san</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>san_clustername=</td>
<td>(StrOpt) Cluster name to use for creating volumes</td>
</tr>
<tr>
<td>san_ip=</td>
<td>(StrOpt) IP address of SAN controller</td>
</tr>
<tr>
<td>san_is_local=False</td>
<td>(BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device</td>
</tr>
<tr>
<td>san_login=admin</td>
<td>(StrOpt) Username for SAN controller</td>
</tr>
<tr>
<td>san_password=</td>
<td>(StrOpt) Password for SAN controller</td>
</tr>
<tr>
<td>san_private_key=</td>
<td>(StrOpt) Filename of private key to use for SSH authentication</td>
</tr>
<tr>
<td>san_ssh_port=22</td>
<td>(IntOpt) SSH port to use with SAN</td>
</tr>
<tr>
<td>san_thin_provision=True</td>
<td>(BoolOpt) Use thin provisioning for SAN volumes?</td>
</tr>
<tr>
<td>ssh_conn_timeout=30</td>
<td>(IntOpt) SSH connection timeout in seconds</td>
</tr>
<tr>
<td>ssh_max_pool_conn=5</td>
<td>(IntOpt) Maximum ssh connections in the pool</td>
</tr>
<tr>
<td>ssh_min_pool_conn=1</td>
<td>(IntOpt) Minimum ssh connections in the pool</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for scality</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>scality_sofs_config=None</td>
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
</tr>
<tr>
<td>scality_sofs_mount_point=$state_path/scality</td>
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
</tr>
<tr>
<td>scality_sofs_volume_dir=cinder/volumes</td>
<td>(StrOpt) Path from Scality SOFS root to volume dir</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for solidfire</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>sf_account_prefix=docwork</td>
<td>(StrOpt) Create SolidFire accounts with this prefix</td>
</tr>
<tr>
<td>sf_allow_tenant_qos=False</td>
<td>(BoolOpt) Allow tenants to specify QOS on create</td>
</tr>
<tr>
<td>sf_api_port=443</td>
<td>(IntOpt) SolidFire API port. Useful if the device api is behind a proxy on a different port.</td>
</tr>
<tr>
<td>sf_emulate_512=True</td>
<td>(BoolOpt) Set 512 byte emulation on volume creation; </td>
</tr>
</tbody>
</table>
</para>

View File

@ -15,10 +15,6 @@
</tr>
</thead>
<tbody>
<tr>
<td>available_devices=</td>
<td>(ListOpt) List of all available devices</td>
</tr>
<tr>
<td>backend=sqlalchemy</td>
<td>(StrOpt) The backend to use for db</td>
@ -31,110 +27,14 @@
<td>capacity_weight_multiplier=1.0</td>
<td>(FloatOpt) Multiplier used for weighing volume capacity. Negative numbers mean to stack vs spread.</td>
</tr>
<tr>
<td>cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml</td>
<td>(StrOpt) config data for cinder huawei plugin</td>
</tr>
<tr>
<td>coraid_esm_address=</td>
<td>(StrOpt) IP address of Coraid ESM</td>
</tr>
<tr>
<td>coraid_group=admin</td>
<td>(StrOpt) Name of group on Coraid ESM to which coraid_user belongs (must have admin privilege)</td>
</tr>
<tr>
<td>coraid_password=password</td>
<td>(StrOpt) Password to connect to Coraid ESM</td>
</tr>
<tr>
<td>coraid_repository_key=coraid_repository</td>
<td>(StrOpt) Volume Type key name to store ESM Repository Name</td>
</tr>
<tr>
<td>coraid_user=admin</td>
<td>(StrOpt) User name to connect to Coraid ESM</td>
</tr>
<tr>
<td>enabled_backends=None</td>
<td>(ListOpt) A list of backend names to use. These backend names should be backed by a unique [CONFIG] group with its options</td>
</tr>
<tr>
<td>eqlx_chap_login=admin</td>
<td>(StrOpt) Existing CHAP account name</td>
</tr>
<tr>
<td>eqlx_chap_password=password</td>
<td>(StrOpt) Password for specified CHAP account name</td>
</tr>
<tr>
<td>eqlx_cli_max_retries=5</td>
<td>(IntOpt) Maximum retry count for reconnection</td>
</tr>
<tr>
<td>eqlx_cli_timeout=30</td>
<td>(IntOpt) Timeout for the Group Manager cli command execution</td>
</tr>
<tr>
<td>eqlx_group_name=group-0</td>
<td>(StrOpt) Group name to use for creating volumes</td>
</tr>
<tr>
<td>eqlx_pool=default</td>
<td>(StrOpt) Pool in which volumes will be created</td>
</tr>
<tr>
<td>eqlx_use_chap=False</td>
<td>(BoolOpt) Use CHAP authentificaion for targets?</td>
</tr>
<tr>
<td>expiry_thres_minutes=720</td>
<td>(IntOpt) Threshold minutes after which cache file can be cleaned.</td>
</tr>
<tr>
<td>hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml</td>
<td>(StrOpt) configuration file for HDS cinder plugin for HUS</td>
</tr>
<tr>
<td>hp3par_api_url=</td>
<td>(StrOpt) 3PAR WSAPI Server Url like https://&lt;3par ip&gt;:8080/api/v1</td>
</tr>
<tr>
<td>hp3par_cpg=OpenStack</td>
<td>(StrOpt) The CPG to use for volume creation</td>
</tr>
<tr>
<td>hp3par_cpg_snap=</td>
<td>(StrOpt) The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used</td>
</tr>
<tr>
<td>hp3par_debug=False</td>
<td>(BoolOpt) Enable HTTP debugging to 3PAR</td>
</tr>
<tr>
<td>hp3par_domain=None</td>
<td>(StrOpt) This option is DEPRECATED and no longer used. The 3par domain name to use.</td>
</tr>
<tr>
<td>hp3par_iscsi_ips=</td>
<td>(ListOpt) List of target iSCSI addresses to use.</td>
</tr>
<tr>
<td>hp3par_password=</td>
<td>(StrOpt) 3PAR Super user password</td>
</tr>
<tr>
<td>hp3par_snapshot_expiration=</td>
<td>(StrOpt) The time in hours when a snapshot expires and is deleted. This must be larger than expiration</td>
</tr>
<tr>
<td>hp3par_snapshot_retention=</td>
<td>(StrOpt) The time in hours to retain a snapshot. You can't delete it before this expires.</td>
</tr>
<tr>
<td>hp3par_username=</td>
<td>(StrOpt) 3PAR Super user username</td>
</tr>
<tr>
<td>iscsi_helper=tgtadm</td>
<td>(StrOpt) iscsi target user-land tool to use</td>
@ -179,22 +79,6 @@
<td>iser_target_prefix=iqn.2010-10.org.iser.openstack:</td>
<td>(StrOpt) prefix for iser volumes</td>
</tr>
<tr>
<td>keymgr_api_class=cinder.keymgr.not_implemented_key_mgr.NotImplementedKeyManager</td>
<td>(StrOpt) The full class name of the key manager API class</td>
</tr>
<tr>
<td>lvm_mirrors=0</td>
<td>(IntOpt) If set, create lvms with multiple mirrors. Note that this requires lvm_mirrors + 2 pvs with available space</td>
</tr>
<tr>
<td>lvm_type=default</td>
<td>(StrOpt) Type of LVM volumes to deploy; (default or thin)</td>
</tr>
<tr>
<td>max_age=0</td>
<td>(IntOpt) number of seconds between subsequent usage refreshes</td>
</tr>
<tr>
<td>max_gigabytes=10000</td>
<td>(IntOpt) maximum number of volume gigabytes to allow per host</td>
@ -223,182 +107,26 @@
<td>min_pool_size=1</td>
<td>(IntOpt) Minimum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>netapp_password=None</td>
<td>(StrOpt) Password for the storage controller</td>
</tr>
<tr>
<td>netapp_server_hostname=None</td>
<td>(StrOpt) Host name for the storage controller</td>
</tr>
<tr>
<td>netapp_server_port=80</td>
<td>(IntOpt) Port number for the storage controller</td>
</tr>
<tr>
<td>netapp_size_multiplier=1.2</td>
<td>(FloatOpt) Volume size multiplier to ensure while creation</td>
</tr>
<tr>
<td>netapp_storage_family=ontap_cluster</td>
<td>(StrOpt) Storage family type.</td>
</tr>
<tr>
<td>netapp_storage_protocol=None</td>
<td>(StrOpt) Storage protocol type.</td>
</tr>
<tr>
<td>netapp_transport_type=http</td>
<td>(StrOpt) Transport type protocol</td>
</tr>
<tr>
<td>netapp_volume_list=None</td>
<td>(StrOpt) Comma separated volumes to be used for provisioning</td>
</tr>
<tr>
<td>netapp_vserver=None</td>
<td>(StrOpt) Cluster vserver to use for provisioning</td>
</tr>
<tr>
<td>num_iser_scan_tries=3</td>
<td>(IntOpt) The maximum number of times to rescan iSER targetto find volume</td>
</tr>
<tr>
<td>num_volume_device_scan_tries=3</td>
<td>(IntOpt) The maximum number of times to rescan targetsto find volume</td>
</tr>
<tr>
<td>san_clustername=</td>
<td>(StrOpt) Cluster name to use for creating volumes</td>
</tr>
<tr>
<td>san_ip=</td>
<td>(StrOpt) IP address of SAN controller</td>
</tr>
<tr>
<td>san_is_local=False</td>
<td>(BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device</td>
</tr>
<tr>
<td>san_login=admin</td>
<td>(StrOpt) Username for SAN controller</td>
</tr>
<tr>
<td>san_password=</td>
<td>(StrOpt) Password for SAN controller</td>
</tr>
<tr>
<td>san_private_key=</td>
<td>(StrOpt) Filename of private key to use for SSH authentication</td>
</tr>
<tr>
<td>san_ssh_port=22</td>
<td>(IntOpt) SSH port to use with SAN</td>
</tr>
<tr>
<td>san_thin_provision=True</td>
<td>(BoolOpt) Use thin provisioning for SAN volumes?</td>
</tr>
<tr>
<td>san_zfs_volume_base=rpool/</td>
<td>(StrOpt) The ZFS path under which to create zvols for volumes.</td>
</tr>
<tr>
<td>scality_sofs_config=None</td>
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
</tr>
<tr>
<td>scality_sofs_mount_point=$state_path/scality</td>
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
</tr>
<tr>
<td>scality_sofs_volume_dir=cinder/volumes</td>
<td>(StrOpt) Path from Scality SOFS root to volume dir</td>
</tr>
<tr>
<td>sf_account_prefix=autodoc</td>
<td>(StrOpt) Create SolidFire accounts with this prefix</td>
</tr>
<tr>
<td>sf_allow_tenant_qos=False</td>
<td>(BoolOpt) Allow tenants to specify QOS on create</td>
</tr>
<tr>
<td>sf_api_port=443</td>
<td>(IntOpt) SolidFire API port. Useful if the device api is behind a proxy on a different port.</td>
</tr>
<tr>
<td>sf_emulate_512=True</td>
<td>(BoolOpt) Set 512 byte emulation on volume creation; </td>
</tr>
<tr>
<td>storwize_svc_connection_protocol=iSCSI</td>
<td>(StrOpt) Connection protocol (iSCSI/FC)</td>
</tr>
<tr>
<td>storwize_svc_flashcopy_timeout=120</td>
<td>(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes)</td>
</tr>
<tr>
<td>storwize_svc_multihostmap_enabled=True</td>
<td>(BoolOpt) Allows vdisk to multi host mapping</td>
</tr>
<tr>
<td>storwize_svc_multipath_enabled=False</td>
<td>(BoolOpt) Connect with multipath (FC only; iSCSI multipath is controlled by Nova)</td>
</tr>
<tr>
<td>storwize_svc_vol_autoexpand=True</td>
<td>(BoolOpt) Storage system autoexpand parameter for volumes (True/False)</td>
</tr>
<tr>
<td>storwize_svc_vol_compression=False</td>
<td>(BoolOpt) Storage system compression option for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_easytier=True</td>
<td>(BoolOpt) Enable Easy Tier for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_grainsize=256</td>
<td>(IntOpt) Storage system grain size parameter for volumes (32/64/128/256)</td>
</tr>
<tr>
<td>storwize_svc_vol_iogrp=0</td>
<td>(IntOpt) The I/O group in which to allocate volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_rsize=2</td>
<td>(IntOpt) Storage system space-efficiency parameter for volumes (percentage)</td>
</tr>
<tr>
<td>storwize_svc_vol_warning=0</td>
<td>(IntOpt) Storage system threshold for volume capacity warnings (percentage)</td>
</tr>
<tr>
<td>storwize_svc_volpool_name=volpool</td>
<td>(StrOpt) Storage system storage pool for volumes</td>
</tr>
<tr>
<td>thres_avl_size_perc_start=20</td>
<td>(IntOpt) Threshold available percent to start cache cleaning.</td>
</tr>
<tr>
<td>thres_avl_size_perc_stop=60</td>
<td>(IntOpt) Threshold available percent to stop cache cleaning.</td>
<td>(IntOpt) The maximum number of times to rescan targets to find volume</td>
</tr>
<tr>
<td>volume_backend_name=None</td>
<td>(StrOpt) The backend name for a given driver implementation</td>
</tr>
<tr>
<td>volume_clear=zero</td>
<td>(StrOpt) Method used to wipe old voumes (valid options are: none, zero, shred)</td>
</tr>
<tr>
<td>volume_clear_size=0</td>
<td>(IntOpt) Size in MiB to wipe at start of old volumes. 0 =&gt; all</td>
</tr>
<tr>
<td>volume_clear=zero</td>
<td>(StrOpt) Method used to wipe old voumes (valid options are: none, zero, shred)</td>
</tr>
<tr>
<td>volume_dd_blocksize=1M</td>
<td>(StrOpt) The default block size used when copying/clearing volumes</td>
@ -407,102 +135,22 @@
<td>volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver</td>
<td>(StrOpt) Driver to use for volume creation</td>
</tr>
<tr>
<td>volume_group=cinder-volumes</td>
<td>(StrOpt) Name for the VG that will contain exported volumes</td>
</tr>
<tr>
<td>volume_manager=cinder.volume.manager.VolumeManager</td>
<td>(StrOpt) full class name for the Manager for volume</td>
</tr>
<tr>
<td>volume_name_template=volume-%s</td>
<td>(StrOpt) Template string to be used to generate volume names</td>
</tr>
<tr>
<td>volume_topic=cinder-volume</td>
<td>(StrOpt) the topic volume nodes listen on</td>
</tr>
<tr>
<td>volume_transfer_key_length=16</td>
<td>(IntOpt) The number of characters in the autogenerated auth key.</td>
</tr>
<tr>
<td>volume_transfer_salt_length=8</td>
<td>(IntOpt) The number of characters in the salt.</td>
</tr>
<tr>
<td>volume_usage_audit_period=month</td>
<td>(StrOpt) time period to generate volume usages for. Time period must be hour, day, month or year</td>
</tr>
<tr>
<td>volumes_dir=$state_path/volumes</td>
<td>(StrOpt) Volume configuration file storage directory</td>
</tr>
<tr>
<td>windows_iscsi_lun_path=C:\iSCSIVirtualDisks</td>
<td>(StrOpt) Path to store VHD backed volumes</td>
</tr>
<tr>
<td>xiv_ds8k_connection_type=iscsi</td>
<td>(StrOpt) Connection type to the IBM Storage Array (fibre_channel|iscsi)</td>
</tr>
<tr>
<td>xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy</td>
<td>(StrOpt) Proxy driver that connects to the IBM Storage Array</td>
<td>volume_usage_audit_period=month</td>
<td>(StrOpt) time period to generate volume usages for. Time period must be hour, day, month or year</td>
</tr>
<tr>
<td>zadara_default_stripesize=64</td>
<td>(StrOpt) Default stripe size for volumes</td>
</tr>
<tr>
<td>zadara_default_striping_mode=simple</td>
<td>(StrOpt) Default striping mode for volumes</td>
</tr>
<tr>
<td>zadara_password=None</td>
<td>(StrOpt) Password for the VPSA</td>
</tr>
<tr>
<td>zadara_user=None</td>
<td>(StrOpt) User name for the VPSA</td>
</tr>
<tr>
<td>zadara_vol_encrypt=False</td>
<td>(BoolOpt) Default encryption policy for volumes</td>
</tr>
<tr>
<td>zadara_vol_name_template=OS_%s</td>
<td>(StrOpt) Default template for VPSA volume names</td>
</tr>
<tr>
<td>zadara_vol_thin=True</td>
<td>(BoolOpt) Default thin provisioning policy for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_allow_nonexistent_delete=True</td>
<td>(BoolOpt) Don't halt on deletion of non-existing volumes</td>
</tr>
<tr>
<td>zadara_vpsa_auto_detach_on_delete=True</td>
<td>(BoolOpt) Automatically detach from servers on volume delete</td>
</tr>
<tr>
<td>zadara_vpsa_ip=None</td>
<td>(StrOpt) Management IP of Zadara VPSA</td>
</tr>
<tr>
<td>zadara_vpsa_poolname=None</td>
<td>(StrOpt) Name of VPSA storage pool for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_port=None</td>
<td>(StrOpt) Zadara VPSA port number</td>
</tr>
<tr>
<td>zadara_vpsa_use_ssl=False</td>
<td>(BoolOpt) Use SSL connection</td>
</tr>
</tbody>
</table>
</para>

View File

@ -21,7 +21,7 @@
</tr>
<tr>
<td>glusterfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for gluster shares</td>
<td>(StrOpt) Base dir containing mount points for gluster shares.</td>
</tr>
<tr>
<td>glusterfs_qcow2_volumes=False</td>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_gpfs</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>gpfs_images_dir=None</td>
<td>(StrOpt) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS.</td>
</tr>
<tr>
<td>gpfs_images_share_mode=None</td>
<td>(StrOpt) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently.</td>
</tr>
<tr>
<td>gpfs_max_clone_depth=0</td>
<td>(IntOpt) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth.</td>
</tr>
<tr>
<td>gpfs_mount_point_base=None</td>
<td>(StrOpt) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored.</td>
</tr>
<tr>
<td>gpfs_sparse_volumes=True</td>
<td>(BoolOpt) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -17,11 +17,11 @@
<tbody>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details.</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares</td>
<td>(StrOpt) Base dir containing mount points for nfs shares.</td>
</tr>
<tr>
<td>nfs_oversub_ratio=1.0</td>

View File

@ -0,0 +1,72 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storwize</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>storwize_svc_connection_protocol=iSCSI</td>
<td>(StrOpt) Connection protocol (iSCSI/FC)</td>
</tr>
<tr>
<td>storwize_svc_flashcopy_timeout=120</td>
<td>(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes)</td>
</tr>
<tr>
<td>storwize_svc_iscsi_chap_enabled=True</td>
<td>(BoolOpt) Configure CHAP authentication for iSCSI connections (Default: Enabled)</td>
</tr>
<tr>
<td>storwize_svc_multihostmap_enabled=True</td>
<td>(BoolOpt) Allows vdisk to multi host mapping</td>
</tr>
<tr>
<td>storwize_svc_multipath_enabled=False</td>
<td>(BoolOpt) Connect with multipath (FC only; iSCSI multipath is controlled by Nova)</td>
</tr>
<tr>
<td>storwize_svc_vol_autoexpand=True</td>
<td>(BoolOpt) Storage system autoexpand parameter for volumes (True/False)</td>
</tr>
<tr>
<td>storwize_svc_vol_compression=False</td>
<td>(BoolOpt) Storage system compression option for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_easytier=True</td>
<td>(BoolOpt) Enable Easy Tier for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_grainsize=256</td>
<td>(IntOpt) Storage system grain size parameter for volumes (32/64/128/256)</td>
</tr>
<tr>
<td>storwize_svc_vol_iogrp=0</td>
<td>(IntOpt) The I/O group in which to allocate volumes</td>
</tr>
<tr>
<td>storwize_svc_volpool_name=volpool</td>
<td>(StrOpt) Storage system storage pool for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_rsize=2</td>
<td>(IntOpt) Storage system space-efficiency parameter for volumes (percentage)</td>
</tr>
<tr>
<td>storwize_svc_vol_warning=0</td>
<td>(IntOpt) Storage system threshold for volume capacity warnings (percentage)</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for windows</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>windows_iscsi_lun_path=C:\iSCSIVirtualDisks</td>
<td>(StrOpt) Path to store VHD backed volumes</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for xiv</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>xiv_ds8k_connection_type=iscsi</td>
<td>(StrOpt) Connection type to the IBM Storage Array (fibre_channel|iscsi)</td>
</tr>
<tr>
<td>xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy</td>
<td>(StrOpt) Proxy driver that connects to the IBM Storage Array</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,68 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for zadara</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>zadara_default_striping_mode=simple</td>
<td>(StrOpt) Default striping mode for volumes</td>
</tr>
<tr>
<td>zadara_password=None</td>
<td>(StrOpt) Password for the VPSA</td>
</tr>
<tr>
<td>zadara_user=None</td>
<td>(StrOpt) User name for the VPSA</td>
</tr>
<tr>
<td>zadara_vol_encrypt=False</td>
<td>(BoolOpt) Default encryption policy for volumes</td>
</tr>
<tr>
<td>zadara_vol_name_template=OS_%s</td>
<td>(StrOpt) Default template for VPSA volume names</td>
</tr>
<tr>
<td>zadara_vol_thin=True</td>
<td>(BoolOpt) Default thin provisioning policy for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_allow_nonexistent_delete=True</td>
<td>(BoolOpt) Don't halt on deletion of non-existing volumes</td>
</tr>
<tr>
<td>zadara_vpsa_auto_detach_on_delete=True</td>
<td>(BoolOpt) Automatically detach from servers on volume delete</td>
</tr>
<tr>
<td>zadara_vpsa_ip=None</td>
<td>(StrOpt) Management IP of Zadara VPSA</td>
</tr>
<tr>
<td>zadara_vpsa_poolname=None</td>
<td>(StrOpt) Name of VPSA storage pool for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_port=None</td>
<td>(StrOpt) Zadara VPSA port number</td>
</tr>
<tr>
<td>zadara_vpsa_use_ssl=False</td>
<td>(BoolOpt) Use SSL connection</td>
</tr>
</tbody>
</table>
</para>

View File

@ -76,6 +76,7 @@
okay to run manage multiple HUS arrays using multiple cinder
instances (or servers)</para></footnote>
</para></listitem></itemizedlist></note>
<xi:include href="../../../common/tables/cinder-hds.xml"/>
<simplesect>
<title>Single Backend</title>
<para>

View File

@ -152,6 +152,7 @@
volume_driver = cinder.volume.drivers.huawei.huawei_iscsi.HuaweiISCSIDriver
cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml
</programlisting>
<xi:include href="../../../common/tables/cinder-huawei.xml"/>
</simplesect>
<simplesect>
<title>Configuration File Details</title>

View File

@ -48,7 +48,7 @@
<para>The following table contains the configuration options
supported by the GPFS driver.</para>
<xi:include
href="../../../common/tables/cinder-gpfs_volume.xml"/>
href="../../../common/tables/cinder-storage_gpfs.xml"/>
<note>
<para>The <literal>gpfs_images_share_mode</literal>
flag is only valid if the Image service is configured to

View File

@ -487,6 +487,7 @@
</tr>
</tbody>
</table>
<xi:include href="../../../common/tables/cinder-storwize.xml"/>
</simplesect>
<simplesect>
<title>Placement with volume types</title>

View File

@ -18,4 +18,5 @@
san_login=sfadmin # your cluster admin login
san_password=sfpassword # your cluster admin password
</programlisting>
<xi:include href="../../../common/tables/cinder-solidfire.xml"/>
</section>

View File

@ -0,0 +1,14 @@
<section xml:id="windows-volume-driver"
xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="5.0">
<title>Windows</title>
<para>There is a volume backend for Windows. Set the following in your
<filename>cinder.conf</filename>, and use the options below to configure it.
</para>
<programlisting language="ini">
volume_driver=cinder.volume.drivers.windows.WindowsDriver
</programlisting>
<xi:include href="../../../common/tables/cinder-windows.xml"/>
</section>

View File

@ -0,0 +1,14 @@
<section xml:id="zadara-volume-driver"
xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="5.0">
<title>Zadara</title>
<para>There is a volume backend for Zadara. Set the following in your
<filename>cinder.conf</filename>, and use the options below to configure it.
</para>
<programlisting language="ini">
volume_driver=cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver
</programlisting>
<xi:include href="../../../common/tables/cinder-zadara.xml"/>
</section>

View File

@ -27,6 +27,8 @@ iscsi_helper=tgtadm</programlisting>
<xi:include href="drivers/nexenta-volume-driver.xml"/>
<xi:include href="drivers/nfs-volume-driver.xml"/>
<xi:include href="drivers/solidfire-volume-driver.xml"/>
<xi:include href="drivers/windows-volume-driver.xml"/>
<xi:include href="drivers/xenapi-nfs.xml"/>
<xi:include href="drivers/xen-sm-driver.xml"/>
<xi:include href="drivers/zadara-volume-driver.xml"/>
</section>

View File

@ -8,7 +8,6 @@
xmlns:ns3="http://www.w3.org/1998/Math/MathML"
xmlns:ns="http://docbook.org/ns/docbook">
<title>OpenStack Block Storage</title>
<?dbhtml stop-chunking?>
<para>The Block Storage project works with many different storage drivers. You can
configure those following the instructions.</para>
<xi:include href="block-storage/section_block-storage-overview.xml"/>

View File

@ -3,10 +3,11 @@ allowed_rpc_exception_modules rpc
amqp_auto_delete rpc
amqp_durable_queues rpc
amqp_rpc_single_reply_queue rpc
api_class keymgr
api_paste_config api
api_rate_limit api
auth_strategy auth
available_devices storage
available_devices block-device
backdoor_port api
backend storage
backlog log
@ -17,7 +18,7 @@ backup_ceph_pool backups_ceph
backup_ceph_stripe_count backups_ceph
backup_ceph_stripe_unit backups_ceph
backup_ceph_user backups_ceph
backup_compression_algorithm backups_swift
backup_compression_algorithm backups
backup_driver backups
backup_manager backups
backup_name_template backups
@ -35,7 +36,7 @@ backup_tsm_password backups_tsm
backup_tsm_volume_prefix backups_tsm
bindir storage
capacity_weight_multiplier storage
cinder_huawei_conf_file storage
cinder_huawei_conf_file huawei
cloned_volume_same_az zones
compute_api_class common
connection connection
@ -44,10 +45,10 @@ connection_trace connection
connection_type connection
control_exchange rpc
coraid_esm_address storage
coraid_group storage
coraid_password storage
coraid_repository_key storage
coraid_user storage
coraid_group coraid
coraid_password coraid
coraid_repository_key coraid
coraid_user coraid
db_backend database
db_driver database
debug common
@ -57,21 +58,22 @@ default_notification_level common
default_publisher_id common
default_volume_type common
disable_process_locking common
enabled_backends storage
enable_new_services common
enable_v1_api api
enable_v2_api api
enabled_backends storage
eqlx_chap_login storage
eqlx_chap_password storage
eqlx_cli_max_retries storage
eqlx_cli_timeout storage
eqlx_group_name storage
eqlx_pool storage
eqlx_use_chap storage
expiry_thres_minutes storage
eqlx_chap_login eqlx
eqlx_chap_password eqlx
eqlx_cli_max_retries eqlx
eqlx_cli_timeout eqlx
eqlx_group_name eqlx
eqlx_pool eqlx
eqlx_use_chap eqlx
expiry_thres_minutes netapp
fake_rabbit rpc
fatal_deprecations common
fatal_exception_format_errors common
fixed_key keymgr
glance_api_insecure images
glance_api_servers images
glance_api_ssl_compression images
@ -85,24 +87,24 @@ glusterfs_mount_point_base storage_glusterfs
glusterfs_qcow2_volumes storage_glusterfs
glusterfs_shares_config storage_glusterfs
glusterfs_sparsed_volumes storage_glusterfs
gpfs_images_dir gpfs_volume
gpfs_images_share_mode gpfs_volume
gpfs_max_clone_depth gpfs_volume
gpfs_mount_point_base gpfs_volume
gpfs_sparse_volumes gpfs_volume
hds_cinder_config_file storage
gpfs_images_dir storage_gpfs
gpfs_images_share_mode storage_gpfs
gpfs_max_clone_depth storage_gpfs
gpfs_mount_point_base storage_gpfs
gpfs_sparse_volumes storage_gpfs
hds_cinder_config_file hds
host common
host common
hp3par_api_url storage
hp3par_cpg storage
hp3par_cpg_snap storage
hp3par_debug storage
hp3par_domain storage
hp3par_iscsi_ips storage
hp3par_password storage
hp3par_snapshot_expiration storage
hp3par_snapshot_retention storage
hp3par_username storage
hp3par_api_url hp3par
hp3par_cpg hp3par
hp3par_cpg_snap hp3par
hp3par_debug hp3par
hp3par_domain hp3par
hp3par_iscsi_ips hp3par
hp3par_password hp3par
hp3par_snapshot_expiration hp3par
hp3par_snapshot_retention hp3par
hp3par_username hp3par
idle_timeout common
iet_conf common
image_conversion_dir images
@ -114,12 +116,11 @@ iscsi_ip_address storage
iscsi_num_targets storage
iscsi_port storage
iscsi_target_prefix storage
iser_helper storage
iser_helper storage
iser_ip_address storage
iser_num_targets storage
iser_port storage
iser_target_prefix storage
keymgr_api_class storage
kombu_ssl_ca_certs rpc
kombu_ssl_certfile rpc
kombu_ssl_keyfile rpc
@ -135,12 +136,12 @@ logging_context_format_string common
logging_debug_format_suffix common
logging_default_format_string common
logging_exception_prefix common
lvm_mirrors storage
lvm_type storage
lvm_mirrors lvm
lvm_type lvm
matchmaker_heartbeat_freq rpc
matchmaker_heartbeat_ttl rpc
matchmaker_ringfile rpc
max_age storage
max_age quota
max_gigabytes storage
max_overflow storage
max_pool_size storage
@ -151,35 +152,35 @@ min_pool_size storage
monkey_patch common
monkey_patch_modules common
my_ip common
netapp_login storage_netapp
netapp_password storage
netapp_server_hostname storage
netapp_server_port storage
netapp_size_multiplier storage
netapp_storage_family storage
netapp_storage_protocol storage
netapp_transport_type storage
netapp_vfiler storage_netapp
netapp_volume_list storage
netapp_vserver storage
nexenta_blocksize storage_nexenta_iscsi
nexenta_host storage_nexenta_iscsi
nexenta_iscsi_target_portal_port storage_nexenta_iscsi
nexenta_mount_options storage_nexenta_nfs
nexenta_mount_point_base storage_nexenta_nfs
nexenta_oversub_ratio storage_nexenta_nfs
nexenta_password storage_nexenta_iscsi
nexenta_rest_port storage_nexenta_iscsi
nexenta_rest_protocol storage_nexenta_iscsi
nexenta_shares_config storage_nexenta_nfs
nexenta_sparse storage_nexenta_iscsi
nexenta_sparsed_volumes storage_nexenta_nfs
nexenta_target_group_prefix storage_nexenta_iscsi
nexenta_target_prefix storage_nexenta_iscsi
nexenta_used_ratio storage_nexenta_nfs
nexenta_user storage_nexenta_iscsi
nexenta_volume storage_nexenta_iscsi
nexenta_volume_compression storage_nexenta_nfs
netapp_login netapp
netapp_password netapp
netapp_server_hostname netapp
netapp_server_port netapp
netapp_size_multiplier netapp
netapp_storage_family netapp
netapp_storage_protocol netapp
netapp_transport_type netapp
netapp_vfiler netapp
netapp_volume_list netapp
netapp_vserver netapp
nexenta_blocksize nexenta_iscsi
nexenta_host nexenta_iscsi
nexenta_iscsi_target_portal_port nexenta_iscsi
nexenta_mount_options nexenta_nfs
nexenta_mount_point_base nexenta_nfs
nexenta_oversub_ratio nexenta_nfs
nexenta_password nexenta_iscsi
nexenta_rest_port nexenta_iscsi
nexenta_rest_protocol nexenta_iscsi
nexenta_shares_config nexenta_nfs
nexenta_sparsed_volumes nexenta_iscsi
nexenta_sparse nexenta_iscsi
nexenta_target_group_prefix nexenta_iscsi
nexenta_target_prefix nexenta_iscsi
nexenta_used_ratio nexenta_nfs
nexenta_user nexenta_iscsi
nexenta_volume_compression nexenta_nfs
nexenta_volume nexenta_iscsi
nfs_mount_options storage_nfs
nfs_mount_point_base storage_nfs
nfs_oversub_ratio storage_nfs
@ -189,25 +190,25 @@ nfs_used_ratio storage_nfs
no_snapshot_gb_quota common
notification_driver rpc
notification_topics rpc
nova_api_insecure common
nova_ca_certificates_file common
nova_catalog_admin_info common
nova_catalog_info common
nova_endpoint_admin_template common
nova_endpoint_template common
nova_api_insecure compute
nova_ca_certificates_file compute
nova_catalog_admin_info compute
nova_catalog_info compute
nova_endpoint_admin_template compute
nova_endpoint_template compute
num_iser_scan_tries storage
num_shell_tries common
num_volume_device_scan_tries storage
os_region_name common
osapi_max_limit api
osapi_max_request_body_size api
osapi_volume_base_URL api
osapi_volume_ext_list api
osapi_volume_extension api
osapi_volume_ext_list api
os_region_name compute
password common
policy_default_rule common
policy_file common
pool_size common
pool_size lvm
port common
publish_errors rpc
pybasedir common
@ -221,10 +222,10 @@ qpid_sasl_mechanisms rpc
qpid_tcp_nodelay rpc
qpid_topology_version rpc
qpid_username rpc
quota_driver common
quota_gigabytes common
quota_snapshots common
quota_volumes common
quota_driver quota
quota_gigabytes quota
quota_snapshots quota
quota_volumes quota
rabbit_ha_queues rpc
rabbit_host rpc
rabbit_hosts rpc
@ -233,8 +234,8 @@ rabbit_password rpc
rabbit_port rpc
rabbit_retry_backoff rpc
rabbit_retry_interval rpc
rabbit_use_ssl rpc
rabbit_userid rpc
rabbit_use_ssl rpc
rabbit_virtual_host rpc
rbd_ceph_conf storage_ceph
rbd_flatten_volume_from_snapshot storage_ceph
@ -242,9 +243,9 @@ rbd_max_clone_depth storage_ceph
rbd_pool storage_ceph
rbd_secret_uuid storage_ceph
rbd_user storage_ceph
reservation_expire common
reservation_expire quota
reserved_percentage common
restore_discard_excess_bytes backups
restore_discard_excess_bytes backups_ceph
retry_interval common
root_helper common
rootwrap_config common
@ -261,18 +262,18 @@ rpc_zmq_matchmaker rpc
rpc_zmq_port rpc
rpc_zmq_topic_backlog rpc
run_external_periodic_tasks common
san_clustername storage
san_ip storage
san_is_local storage
san_login storage
san_password storage
san_private_key storage
san_ssh_port storage
san_thin_provision storage
san_zfs_volume_base storage
scality_sofs_config storage
scality_sofs_mount_point storage
scality_sofs_volume_dir storage
san_clustername san
san_ip san
san_is_local san
san_login san
san_password san
san_private_key san
san_ssh_port san
san_thin_provision san
san_zfs_volume_base san-solaris
scality_sofs_config scality
scality_sofs_mount_point scality
scality_sofs_volume_dir scality
scheduler_default_filters scheduler
scheduler_default_weighers scheduler
scheduler_driver scheduler
@ -282,42 +283,43 @@ scheduler_manager scheduler
scheduler_max_attempts scheduler
scheduler_topic scheduler
service_down_time common
sf_account_prefix storage
sf_allow_tenant_qos storage
sf_api_port storage
sf_emulate_512 storage
snapshot_name_template backup
snapshot_same_host backup
sf_account_prefix solidfire
sf_allow_tenant_qos solidfire
sf_api_port solidfire
sf_emulate_512 solidfire
snapshot_name_template backups
snapshot_same_host backups
sqlite_db common
sqlite_synchronous common
ssh_conn_timeout common
ssh_max_pool_conn common
ssh_min_pool_conn common
ssh_conn_timeout san
ssh_max_pool_conn san
ssh_min_pool_conn san
ssl_ca_file common
ssl_cert_file common
ssl_key_file common
state_path common
storage_availability_zone common
storwize_svc_connection_protocol storage
storwize_svc_flashcopy_timeout storage
storwize_svc_multihostmap_enabled storage
storwize_svc_multipath_enabled storage
storwize_svc_vol_autoexpand storage
storwize_svc_vol_compression storage
storwize_svc_vol_easytier storage
storwize_svc_vol_grainsize storage
storwize_svc_vol_iogrp storage
storwize_svc_vol_rsize storage
storwize_svc_vol_warning storage
storwize_svc_volpool_name storage
storwize_svc_connection_protocol storwize
storwize_svc_flashcopy_timeout storwize
storwize_svc_iscsi_chap_enabled storwize
storwize_svc_multihostmap_enabled storwize
storwize_svc_multipath_enabled storwize
storwize_svc_vol_autoexpand storwize
storwize_svc_vol_compression storwize
storwize_svc_vol_easytier storwize
storwize_svc_vol_grainsize storwize
storwize_svc_vol_iogrp storwize
storwize_svc_volpool_name storwize
storwize_svc_vol_rsize storwize
storwize_svc_vol_warning storwize
syslog_log_facility common
tcp_keepidle common
thres_avl_size_perc_start storage
thres_avl_size_perc_stop storage
thres_avl_size_perc_start netapp
thres_avl_size_perc_stop netapp
topics common
transfer_api_class api
until_refresh common
use_default_quota_class common
use_default_quota_class quota
use_forwarded_for common
use_multipath_for_image_xfer images
use_stderr common
@ -326,38 +328,38 @@ use_tpool common
verbose common
volume_api_class api
volume_backend_name storage
volume_clear storage
volume_clear_size storage
volume_clear storage
volume_dd_blocksize storage
volume_driver storage
volume_group storage
volume_group lvm
volume_manager storage
volume_name_template storage
volume_tmp_dir storage_ceph
volume_topic storage
volume_transfer_key_length storage
volume_transfer_salt_length storage
volume_usage_audit_period storage
volume_name_template api
volumes_dir storage
windows_iscsi_lun_path storage
volume_tmp_dir storage_ceph
volume_topic rpc
volume_transfer_key_length api
volume_transfer_salt_length api
volume_usage_audit_period storage
windows_iscsi_lun_path windows
xenapi_connection_password storage_xen
xenapi_connection_url storage_xen
xenapi_connection_username storage_xen
xenapi_nfs_server storage_xen
xenapi_nfs_serverpath storage_xen
xenapi_sr_base_path storage_xen
xiv_ds8k_connection_type storage
xiv_ds8k_proxy storage
xiv_ds8k_connection_type xiv
xiv_ds8k_proxy xiv
zadara_default_stripesize storage
zadara_default_striping_mode storage
zadara_password storage
zadara_user storage
zadara_vol_encrypt storage
zadara_vol_name_template storage
zadara_vol_thin storage
zadara_vpsa_allow_nonexistent_delete storage
zadara_vpsa_auto_detach_on_delete storage
zadara_vpsa_ip storage
zadara_vpsa_poolname storage
zadara_vpsa_port storage
zadara_vpsa_use_ssl storage
zadara_default_striping_mode zadara
zadara_password zadara
zadara_user zadara
zadara_vol_encrypt zadara
zadara_vol_name_template zadara
zadara_vol_thin zadara
zadara_vpsa_allow_nonexistent_delete zadara
zadara_vpsa_auto_detach_on_delete zadara
zadara_vpsa_ip zadara
zadara_vpsa_poolname zadara
zadara_vpsa_port zadara
zadara_vpsa_use_ssl zadara