openstack-manuals/doc/common/tables/nova-volumes.xml
Shaun McCance 14908866c8 Add group information to nova config reference
Change-Id: I8f25f33e031fb28f26a773a79e99e719e671bc7e
2014-01-20 10:16:26 -05:00

140 lines
6.4 KiB
XML

<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for volumes</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<th>Configuration option = Default value</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<th colspan="2">[DEFAULT]</th>
</tr>
<tr>
<td>cinder_api_insecure = False</td>
<td>(BoolOpt) Allow to perform insecure SSL requests to cinder</td>
</tr>
<tr>
<td>cinder_ca_certificates_file = None</td>
<td>(StrOpt) Location of ca certificates file to use for cinder client requests.</td>
</tr>
<tr>
<td>cinder_catalog_info = volume:cinder:publicURL</td>
<td>(StrOpt) Info to match when looking for cinder in the service catalog. Format is : separated values of the form: &lt;service_type&gt;:&lt;service_name&gt;:&lt;endpoint_type&gt;</td>
</tr>
<tr>
<td>cinder_cross_az_attach = True</td>
<td>(BoolOpt) Allow attach between instance and volume in different availability zones.</td>
</tr>
<tr>
<td>cinder_endpoint_template = None</td>
<td>(StrOpt) Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s</td>
</tr>
<tr>
<td>cinder_http_retries = 3</td>
<td>(IntOpt) Number of cinderclient retries on failed http calls</td>
</tr>
<tr>
<td>os_region_name = None</td>
<td>(StrOpt) region name of this node</td>
</tr>
<tr>
<td>volume_api_class = nova.volume.cinder.API</td>
<td>(StrOpt) The full class name of the volume API class to use</td>
</tr>
<tr>
<td>volume_usage_poll_interval = 0</td>
<td>(IntOpt) Interval in seconds for gathering volume usages</td>
</tr>
<tr>
<th colspan="2">[baremetal]</th>
</tr>
<tr>
<td>iscsi_iqn_prefix = iqn.2010-10.org.openstack.baremetal</td>
<td>(StrOpt) iSCSI IQN prefix used in baremetal volume connections.</td>
</tr>
<tr>
<td>volume_driver = nova.virt.baremetal.volume_driver.LibvirtVolumeDriver</td>
<td>(StrOpt) Baremetal volume driver.</td>
</tr>
<tr>
<th colspan="2">[hyperv]</th>
</tr>
<tr>
<td>force_volumeutils_v1 = False</td>
<td>(BoolOpt) Force V1 volume utility class</td>
</tr>
<tr>
<td>volume_attach_retry_count = 10</td>
<td>(IntOpt) The number of times to retry to attach a volume</td>
</tr>
<tr>
<td>volume_attach_retry_interval = 5</td>
<td>(IntOpt) Interval between volume attachment attempts, in seconds</td>
</tr>
<tr>
<th colspan="2">[libvirt]</th>
</tr>
<tr>
<td>glusterfs_mount_point_base = $state_path/mnt</td>
<td>(StrOpt) Dir where the glusterfs volume is mounted on the compute node</td>
</tr>
<tr>
<td>nfs_mount_options = None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nfs_mount_point_base = $state_path/mnt</td>
<td>(StrOpt) Dir where the nfs volume is mounted on the compute node</td>
</tr>
<tr>
<td>num_aoe_discover_tries = 3</td>
<td>(IntOpt) number of times to rediscover AoE target to find volume</td>
</tr>
<tr>
<td>num_iscsi_scan_tries = 3</td>
<td>(IntOpt) number of times to rescan iSCSI target to find volume</td>
</tr>
<tr>
<td>num_iser_scan_tries = 3</td>
<td>(IntOpt) number of times to rescan iSER target to find volume</td>
</tr>
<tr>
<td>qemu_allowed_storage_drivers = </td>
<td>(ListOpt) Protocols listed here will be accessed directly from QEMU. Currently supported protocols: [gluster]</td>
</tr>
<tr>
<td>rbd_secret_uuid = None</td>
<td>(StrOpt) the libvirt uuid of the secret for the rbd_uservolumes</td>
</tr>
<tr>
<td>rbd_user = None</td>
<td>(StrOpt) the RADOS client name for accessing rbd volumes</td>
</tr>
<tr>
<td>scality_sofs_config = None</td>
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
</tr>
<tr>
<td>scality_sofs_mount_point = $state_path/scality</td>
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
</tr>
<tr>
<th colspan="2">[xenserver]</th>
</tr>
<tr>
<td>block_device_creation_timeout = 10</td>
<td>(IntOpt) Time to wait for a block device to be created</td>
</tr>
</tbody>
</table>
</para>