Clean up variables to use correct <replaceable>XXX</replaceable> formatting.
Fix typos, white space, and other issues. Change-Id: I61bae90144caf9dbc0149990ff92eb0c0eebec24 author: Diane Fleming dfleming@austin.rr.com
This commit is contained in:
parent
ea4f68a88c
commit
f96ecae12c
@ -1,9 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<book xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="openstack-compute-admin-manual-grizzly">
|
||||
<book xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="openstack-compute-admin-manual-grizzly">
|
||||
<title>OpenStack Cloud Administrator Guide</title>
|
||||
<?rax title.font.size="28px" subtitle.font.size="28px"?>
|
||||
<titleabbrev>Cloud Administrator Guide</titleabbrev>
|
||||
@ -27,35 +25,39 @@
|
||||
<pubdate/>
|
||||
<legalnotice role="apache2">
|
||||
<annotation>
|
||||
<remark>Copyright details are filled in by the
|
||||
template.</remark>
|
||||
<remark>Copyright details are filled in by the template.</remark>
|
||||
</annotation>
|
||||
</legalnotice>
|
||||
<legalnotice role="cc-by-sa">
|
||||
<annotation>
|
||||
<remark>Remaining licensing details are filled in by
|
||||
the template.</remark>
|
||||
<remark>Remaining licensing details are filled in by the template.</remark>
|
||||
</annotation>
|
||||
</legalnotice>
|
||||
<abstract>
|
||||
<para>OpenStack offers open source software for cloud
|
||||
administrators to manage and troubleshoot an OpenStack
|
||||
cloud.</para>
|
||||
<para>OpenStack offers open source software for cloud administrators to manage and
|
||||
troubleshoot an OpenStack cloud.</para>
|
||||
</abstract>
|
||||
<revhistory>
|
||||
<!-- ... continue adding more revisions here as you change this document using the markup shown below... -->
|
||||
<revision>
|
||||
<date>2014-07-21</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Updated variables to use correct formatting.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2014-04-17</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>For the Icehouse release, the guide
|
||||
was organized with system
|
||||
administration and system architecture
|
||||
sections. Also, how-to sections were
|
||||
moved to this guide instead of the
|
||||
<citetitle>OpenStack
|
||||
Configuration Reference</citetitle>.
|
||||
<para>For the Icehouse release, the guide was organized with system
|
||||
administration and system architecture sections. Also, how-to
|
||||
sections were moved to this guide instead of the
|
||||
<citetitle>OpenStack Configuration Reference</citetitle>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
@ -66,9 +68,8 @@
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Adds options for tuning operational
|
||||
status synchronization in the NSX
|
||||
plug-in.</para>
|
||||
<para>Adds options for tuning operational status synchronization in the
|
||||
NSX plug-in.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
@ -88,12 +89,10 @@
|
||||
<revdescription>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>Moves object storage monitoring
|
||||
section to this guide.</para>
|
||||
<para>Moves object storage monitoring section to this guide.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Removes redundant object storage
|
||||
information.</para>
|
||||
<para>Removes redundant object storage information.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
@ -103,26 +102,21 @@
|
||||
<revdescription>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>Moved all but configuration and
|
||||
installation information from these
|
||||
component guides to create the new
|
||||
guide:</para>
|
||||
<para>Moved all but configuration and installation information from
|
||||
these component guides to create the new guide:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>OpenStack Compute
|
||||
Administration Guide</para>
|
||||
<para>OpenStack Compute Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Networking
|
||||
Administration Guide</para>
|
||||
<para>OpenStack Networking Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Object Storage
|
||||
Administration Guide</para>
|
||||
<para>OpenStack Object Storage Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Block Storage
|
||||
Service Administration Guide</para>
|
||||
<para>OpenStack Block Storage Service Administration
|
||||
Guide</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
|
@ -74,7 +74,7 @@
|
||||
reserved for the snapshot volume, the name of
|
||||
the snapshot, and the path of an already
|
||||
existing volume. Generally, this path is
|
||||
<filename>/dev/cinder-volumes/<replaceable>$volume_name</replaceable></filename>.</para>
|
||||
<filename>/dev/cinder-volumes/<replaceable>VOLUME_NAME</replaceable></filename>.</para>
|
||||
<para>The size does not have to be the same as the
|
||||
volume of the snapshot. The
|
||||
<parameter>size</parameter> parameter
|
||||
@ -84,7 +84,6 @@
|
||||
volume, even if the whole space is not
|
||||
currently used by the snapshot.</para>
|
||||
</step>
|
||||
|
||||
<step>
|
||||
<para>Run the <command>lvdisplay</command> command
|
||||
again to verify the snapshot:</para>
|
||||
|
@ -1,197 +1,15 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="glusterfs_backend">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="glusterfs_backend">
|
||||
<title>Configure a GlusterFS back end</title>
|
||||
<para>
|
||||
This section explains how to configure OpenStack Block Storage
|
||||
to use GlusterFS as a back end. You must be able to access the
|
||||
GlusterFS shares from the server that hosts the
|
||||
<systemitem class="service">cinder</systemitem> volume
|
||||
service.
|
||||
</para>
|
||||
<note>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The
|
||||
<systemitem class="service">cinder</systemitem> volume service
|
||||
is named <literal>openstack-cinder-volume</literal> on the
|
||||
following distributions:</para>
|
||||
<para>This section explains how to configure OpenStack Block Storage to use GlusterFS as a back
|
||||
end. You must be able to access the GlusterFS shares from the server that hosts the
|
||||
<systemitem class="service">cinder</systemitem> volume service.</para>
|
||||
<note>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The <systemitem class="service"
|
||||
>cinder</systemitem> volume service is named
|
||||
<literal>openstack-cinder-volume</literal> on the following distributions:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora"><para>CentOS</para></listitem>
|
||||
<listitem os="rhel;centos;fedora"><para>Fedora</para></listitem>
|
||||
<listitem os="opensuse;sles"><para>openSUSE</para></listitem>
|
||||
<listitem os="rhel;centos;fedora"><para>Red Hat Enterprise
|
||||
Linux</para></listitem>
|
||||
<listitem os="opensuse;sles"><para>SUSE Linux Enterprise
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
<para>In Ubuntu and Debian distributions, the
|
||||
<systemitem class="service">cinder</systemitem> volume
|
||||
service is named <literal>cinder-volume</literal>.</para>
|
||||
</note>
|
||||
<para>
|
||||
Mounting GlusterFS volumes requires utilities and libraries
|
||||
from the <package>glusterfs-fuse</package> package. This
|
||||
package must be installed on all systems that will access
|
||||
volumes backed by GlusterFS.
|
||||
</para>
|
||||
<note os="ubuntu;debian">
|
||||
<para>
|
||||
The utilities and libraries required for mounting GlusterFS
|
||||
volumes on Ubuntu and Debian distributions are available from
|
||||
the <package>glusterfs-client</package> package instead.
|
||||
</para>
|
||||
</note>
|
||||
<para>For information on how to install and configure
|
||||
GlusterFS, refer to the
|
||||
<link xlink:href="http://gluster.org/community/documentation/index.php/Main_Page">GlusterDocumentation</link>
|
||||
page.</para>
|
||||
<procedure>
|
||||
<title>Configure GlusterFS for OpenStack Block Storage</title>
|
||||
<para>
|
||||
The GlusterFS server must also be configured accordingly in
|
||||
order to allow OpenStack Block Storage to use GlusterFS shares:
|
||||
</para>
|
||||
<step>
|
||||
<para>
|
||||
Log in as <systemitem>root</systemitem> to the
|
||||
GlusterFS server.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Set each Gluster volume to use the same UID and GID as
|
||||
the <systemitem>cinder</systemitem> user:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-uid <replaceable>cinder-uid</replaceable></userinput>
|
||||
<prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-gid <replaceable>cinder-gid</replaceable></userinput></screen>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><replaceable>VOL_NAME</replaceable> is the
|
||||
Gluster volume name.</para></listitem>
|
||||
<listitem><para><replaceable>cinder-uid</replaceable> is the UID of the <systemitem>cinder</systemitem> user.</para></listitem>
|
||||
<listitem><para><replaceable>cinder-gid</replaceable> is the GID of the <systemitem>cinder</systemitem> user.</para></listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>
|
||||
The default UID and GID of the
|
||||
<systemitem>cinder</systemitem> user is
|
||||
<literal>165</literal> on most distributions.
|
||||
</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Configure each Gluster volume to accept
|
||||
<systemitem>libgfapi</systemitem> connections. To do
|
||||
this, set each Gluster volume to allow insecure ports:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> server.allow-insecure on</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Enable client connections from unprivileged ports.
|
||||
To do this, add the following line to
|
||||
<filename>/etc/glusterfs/glusterd.vol</filename>:
|
||||
</para>
|
||||
<programlisting>option rpc-auth-allow-insecure on</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Restart the <systemitem>glusterd</systemitem> service:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>service glusterd restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Configure Block Storage to use a GlusterFS back end</title>
|
||||
<para>
|
||||
After you configure the GlusterFS service, complete these
|
||||
steps:
|
||||
</para>
|
||||
<step>
|
||||
<para>
|
||||
Log in as <systemitem>root</systemitem> to the system
|
||||
hosting the cinder volume service.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Create a text file named
|
||||
<filename>glusterfs</filename>
|
||||
in <filename>/etc/cinder/</filename>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Add an entry to
|
||||
<filename>/etc/cinder/glusterfs</filename> for each
|
||||
GlusterFS share that OpenStack Block Storage should use
|
||||
for back end storage. Each entry should be a separate line,
|
||||
and should use the following format:</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable></programlisting>
|
||||
<para>
|
||||
Where:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>HOST</replaceable> is the IP address
|
||||
or host name of the Red Hat Storage server.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>VOL_NAME</replaceable> is the name an
|
||||
existing and accessible volume on the GlusterFS
|
||||
server.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>
|
||||
Optionally, if your environment requires additional
|
||||
mount options for a share, you can add them to the
|
||||
share's entry:
|
||||
</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable> -o <replaceable>OPTIONS</replaceable></programlisting>
|
||||
<para>
|
||||
Replace <replaceable>OPTIONS</replaceable> with a
|
||||
comma-separated list of mount options.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be
|
||||
owned by the <systemitem>root</systemitem> user and the
|
||||
<systemitem>cinder</systemitem> group.
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>chown root:cinder /etc/cinder/glusterfs</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be
|
||||
readable by members of the <systemitem>cinder</systemitem>
|
||||
group:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>chmod 0640 <replaceable>FILE</replaceable></userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure OpenStack Block Storage to use the
|
||||
<filename>/etc/cinder/glusterfs</filename> file created
|
||||
earlier. To do so, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename> configuration
|
||||
file and set the
|
||||
<literal>glusterfs_shares_config</literal> configuration
|
||||
key to <filename>/etc/cinder/glusterfs</filename>.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT glusterfs_shares_config /etc/cinder/glusterfs</userinput></screen>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The following
|
||||
distributions include
|
||||
<application>openstack-config</application>:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>CentOS</para>
|
||||
</listitem>
|
||||
@ -207,80 +25,195 @@ DEFAULT glusterfs_shares_config /etc/cinder/glusterfs</userinput></screen>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>SUSE Linux Enterprise</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>In Ubuntu and Debian distributions, the <systemitem class="service"
|
||||
>cinder</systemitem> volume service is named
|
||||
<literal>cinder-volume</literal>.</para>
|
||||
</note>
|
||||
<para>Mounting GlusterFS volumes requires utilities and libraries from the
|
||||
<package>glusterfs-fuse</package> package. This package must be installed on all systems
|
||||
that will access volumes backed by GlusterFS.</para>
|
||||
<note os="ubuntu;debian">
|
||||
<para>The utilities and libraries required for mounting GlusterFS volumes on Ubuntu and
|
||||
Debian distributions are available from the <package>glusterfs-client</package> package
|
||||
instead.</para>
|
||||
</note>
|
||||
<para>For information on how to install and configure GlusterFS, refer to the <link
|
||||
xlink:href="http://gluster.org/community/documentation/index.php/Main_Page"
|
||||
>GlusterDocumentation</link> page.</para>
|
||||
<procedure>
|
||||
<title>Configure GlusterFS for OpenStack Block Storage</title>
|
||||
<para>The GlusterFS server must also be configured accordingly in order to allow OpenStack
|
||||
Block Storage to use GlusterFS shares:</para>
|
||||
<step>
|
||||
<para>Log in as <systemitem>root</systemitem> to the GlusterFS server.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set each Gluster volume to use the same UID and GID as the
|
||||
<systemitem>cinder</systemitem> user:</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-uid <replaceable>CINDER_UID</replaceable></userinput>
|
||||
<prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-gid <replaceable>CINDER_GID</replaceable></userinput></screen>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><replaceable>VOL_NAME</replaceable> is the Gluster volume name.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><replaceable>CINDER_UID</replaceable> is the UID of the
|
||||
<systemitem>cinder</systemitem> user.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><replaceable>CINDER_GID</replaceable> is the GID of the
|
||||
<systemitem>cinder</systemitem> user.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>The default UID and GID of the <systemitem>cinder</systemitem> user is
|
||||
<literal>165</literal> on most distributions.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure each Gluster volume to accept <systemitem>libgfapi</systemitem>
|
||||
connections. To do this, set each Gluster volume to allow insecure ports:</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> server.allow-insecure on</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable client connections from unprivileged ports. To do this, add the following
|
||||
line to <filename>/etc/glusterfs/glusterd.vol</filename>:</para>
|
||||
<programlisting>option rpc-auth-allow-insecure on</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem>glusterd</systemitem> service:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service glusterd restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Configure Block Storage to use a GlusterFS back end</title>
|
||||
<para>After you configure the GlusterFS service, complete these steps:</para>
|
||||
<step>
|
||||
<para>Log in as <systemitem>root</systemitem> to the system hosting the cinder volume
|
||||
service.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a text file named <filename>glusterfs</filename> in
|
||||
<filename>/etc/cinder/</filename>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Add an entry to <filename>/etc/cinder/glusterfs</filename> for each GlusterFS
|
||||
share that OpenStack Block Storage should use for back end storage. Each entry
|
||||
should be a separate line, and should use the following format:</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable></programlisting>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>HOST</replaceable> is the IP address or host name of the Red
|
||||
Hat Storage server.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>VOL_NAME</replaceable> is the name an existing and accessible
|
||||
volume on the GlusterFS server.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Optionally, if your environment requires additional mount options for a share, you
|
||||
can add them to the share's entry:</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable> -o <replaceable>OPTIONS</replaceable></programlisting>
|
||||
<para>Replace <replaceable>OPTIONS</replaceable> with a comma-separated list of mount
|
||||
options.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be owned by the
|
||||
<systemitem>root</systemitem> user and the <systemitem>cinder</systemitem>
|
||||
group.</para>
|
||||
<screen><prompt>#</prompt> <userinput>chown root:cinder /etc/cinder/glusterfs</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be readable by members of the
|
||||
<systemitem>cinder</systemitem> group:</para>
|
||||
<screen><prompt>#</prompt> <userinput>chmod 0640 <replaceable>FILE</replaceable></userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure OpenStack Block Storage to use the
|
||||
<filename>/etc/cinder/glusterfs</filename> file created earlier. To do so, open
|
||||
the <filename>/etc/cinder/cinder.conf</filename> configuration file and set the
|
||||
<literal>glusterfs_shares_config</literal> configuration key to
|
||||
<filename>/etc/cinder/glusterfs</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running
|
||||
the following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT glusterfs_shares_config /etc/cinder/glusterfs</userinput></screen>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The following distributions include
|
||||
<application>openstack-config</application>:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>CentOS</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Fedora</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>openSUSE</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Red Hat Enterprise Linux</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>SUSE Linux Enterprise</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure OpenStack Block Storage to use the correct
|
||||
volume driver, namely
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>. To do
|
||||
so, open the <filename>/etc/cinder/cinder.conf</filename>
|
||||
configuration file and set the
|
||||
<literal>volume_driver</literal> configuration key to
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
<para>Configure OpenStack Block Storage to use the correct volume driver, namely
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>. To do so, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename> configuration file and set the
|
||||
<literal>volume_driver</literal> configuration key to
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running
|
||||
the following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver</userinput></screen>
|
||||
</step>
|
||||
<step><para>
|
||||
You can now restart the service to apply the configuration.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">To restart the
|
||||
<systemitem class="service">cinder</systemitem> volume service
|
||||
on CentOS, Fedora, openSUSE, RedHat Enterprise Linux, or SUSE
|
||||
Linux Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>service openstack-cinder-volume restart</userinput></screen>
|
||||
<para os="debian;ubuntu">To restart the
|
||||
<systemitem class="service">cinder</systemitem> volume service
|
||||
on Ubuntu or Debian, run:</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>service cinder-volume restart</userinput></screen></step>
|
||||
<step>
|
||||
<para>You can now restart the service to apply the configuration.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">To restart the <systemitem class="service"
|
||||
>cinder</systemitem> volume service on CentOS, Fedora, openSUSE, Red Hat
|
||||
Enterprise Linux, or SUSE Linux Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>service openstack-cinder-volume restart</userinput></screen>
|
||||
<para os="debian;ubuntu">To restart the <systemitem class="service">cinder</systemitem>
|
||||
volume service on Ubuntu or Debian, run:</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>service cinder-volume restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>OpenStack Block Storage is now configured to use a GlusterFS
|
||||
back end.</para>
|
||||
<para>OpenStack Block Storage is now configured to use a GlusterFS back end.</para>
|
||||
<note>
|
||||
<para>
|
||||
In <filename>/etc/cinder/cinder.conf</filename>, the
|
||||
<literal>glusterfs_sparsed_volumes</literal>
|
||||
configuration key determines whether volumes are
|
||||
created as sparse files and grown as needed or fully
|
||||
allocated up front. The default and recommended value
|
||||
of this key is <literal>true</literal>, which ensures
|
||||
volumes are initially created as sparse files.
|
||||
</para>
|
||||
<para>
|
||||
Setting <literal>glusterfs_sparsed_volumes</literal>
|
||||
to <literal>false</literal> will result in volumes
|
||||
being fully allocated at the time of creation. This
|
||||
leads to increased delays in volume creation.
|
||||
</para>
|
||||
<para>
|
||||
However, should you choose to set
|
||||
<literal>glusterfs_sparsed_volumes</literal> to
|
||||
<literal>false</literal>, you can do so directly in
|
||||
<filename>/etc/cinder/cinder.conf</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
<para>In <filename>/etc/cinder/cinder.conf</filename>, the
|
||||
<literal>glusterfs_sparsed_volumes</literal> configuration key determines whether
|
||||
volumes are created as sparse files and grown as needed or fully allocated up front. The
|
||||
default and recommended value of this key is <literal>true</literal>, which ensures
|
||||
volumes are initially created as sparse files.</para>
|
||||
<para>Setting <literal>glusterfs_sparsed_volumes</literal> to <literal>false</literal> will
|
||||
result in volumes being fully allocated at the time of creation. This leads to increased
|
||||
delays in volume creation.</para>
|
||||
<para>However, should you choose to set <literal>glusterfs_sparsed_volumes</literal> to
|
||||
<literal>false</literal>, you can do so directly in
|
||||
<filename>/etc/cinder/cinder.conf</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running the
|
||||
following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT glusterfs_sparsed_volumes false</userinput></screen>
|
||||
</note>
|
||||
<important>
|
||||
<para>If a client host has SELinux enabled, the
|
||||
<systemitem>virt_use_fusefs</systemitem> Boolean should also be
|
||||
enabled if the host requires access to GlusterFS volumes on
|
||||
an instance. To enable this Boolean, run the following command
|
||||
as the <systemitem>root</systemitem> user:
|
||||
</para>
|
||||
<para>If a client host has SELinux enabled, the <systemitem>virt_use_fusefs</systemitem>
|
||||
Boolean should also be enabled if the host requires access to GlusterFS volumes on an
|
||||
instance. To enable this Boolean, run the following command as the
|
||||
<systemitem>root</systemitem> user:</para>
|
||||
<screen><prompt>#</prompt> <userinput>setsebool -P virt_use_fusefs on</userinput></screen>
|
||||
<para>This command also makes the Boolean persistent across
|
||||
reboots. Run this command on all client hosts that require
|
||||
access to GlusterFS volumes on an instance. This includes all
|
||||
compute nodes.</para>
|
||||
<para>This command also makes the Boolean persistent across reboots. Run this command on all
|
||||
client hosts that require access to GlusterFS volumes on an instance. This includes all
|
||||
compute nodes.</para>
|
||||
</important>
|
||||
</section>
|
||||
|
@ -1,42 +1,33 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="volume-backup-restore-export-import">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="volume-backup-restore-export-import">
|
||||
<title>Export and import backup metadata</title>
|
||||
<para>A volume backup can only be restored on the same Block
|
||||
Storage service. This is because restoring a volume from a backup
|
||||
requires metadata available on the database used by the Block
|
||||
Storage service.</para>
|
||||
<note><para>For information on how to back up and restore a
|
||||
volume, see <xref linkend="volume-backup-restore"/>.</para></note>
|
||||
<para>You can, however, export the metadata of a volume backup. To
|
||||
do so, run this command as an OpenStack <literal>admin</literal>
|
||||
user (presumably, after creating a volume backup):
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-export <replaceable>backup_ID</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>backup_ID</replaceable> is the volume
|
||||
backup's ID. This command should return the backup's corresponding
|
||||
database information as encoded string metadata.</para>
|
||||
<para>Exporting and storing this encoded string metadata allows
|
||||
you to completely restore the backup, even in the event of a
|
||||
catastrophic database failure. This will preclude the need to
|
||||
back up the entire Block Storage database, particularly if you
|
||||
only need to keep complete backups of a small subset of
|
||||
volumes.</para>
|
||||
<para>In addition, having a volume backup and its backup metadata also
|
||||
provides volume portability. Specifically, backing up a volume and
|
||||
exporting its metadata will allow you to restore the volume on a
|
||||
completely different Block Storage database, or even on a different
|
||||
cloud service. To do so, first import the backup metadata to the Block
|
||||
Storage database and then restore the backup.</para>
|
||||
<para>To import backup metadata, run the following command as an
|
||||
OpenStack <literal>admin</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-import <replaceable>metadata</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>metadata</replaceable> is the backup
|
||||
metadata exported earlier.</para>
|
||||
<para>Once you have imported the backup metadata into a Block Storage
|
||||
database, restore the volume (<xref
|
||||
linkend="volume-backup-restore"/>).</para>
|
||||
<para>A volume backup can only be restored on the same Block Storage service. This is because
|
||||
restoring a volume from a backup requires metadata available on the database used by the
|
||||
Block Storage service.</para>
|
||||
<note>
|
||||
<para>For information about how to back up and restore a volume, see <xref
|
||||
linkend="volume-backup-restore"/>.</para>
|
||||
</note>
|
||||
<para>You can, however, export the metadata of a volume backup. To do so, run this command as an
|
||||
OpenStack <literal>admin</literal> user (presumably, after creating a volume backup):</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-export <replaceable>BACKUP_ID</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>BACKUP_ID</replaceable> is the volume backup's ID. This command should
|
||||
return the backup's corresponding database information as encoded string metadata.</para>
|
||||
<para>Exporting and storing this encoded string metadata allows you to completely restore the
|
||||
backup, even in the event of a catastrophic database failure. This will preclude the need to
|
||||
back up the entire Block Storage database, particularly if you only need to keep complete
|
||||
backups of a small subset of volumes.</para>
|
||||
<para>In addition, having a volume backup and its backup metadata also provides volume
|
||||
portability. Specifically, backing up a volume and exporting its metadata will allow you to
|
||||
restore the volume on a completely different Block Storage database, or even on a different
|
||||
cloud service. To do so, first import the backup metadata to the Block Storage database and
|
||||
then restore the backup.</para>
|
||||
<para>To import backup metadata, run the following command as an OpenStack
|
||||
<literal>admin</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-import <replaceable>METADATA</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>METADATA</replaceable> is the backup metadata exported earlier.</para>
|
||||
<para>Once you have imported the backup metadata into a Block Storage database, restore the
|
||||
volume (<xref linkend="volume-backup-restore"/>).</para>
|
||||
</section>
|
||||
|
@ -1,32 +1,22 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="volume-backup-restore">
|
||||
<title>Back up and restore volumes</title>
|
||||
<para>The <command>cinder</command> command-line interface
|
||||
provides the tools for creating a volume backup. You can restore
|
||||
a volume from a backup as long as the backup's associated database
|
||||
information (or backup metadata) is intact in the Block Storage
|
||||
database.
|
||||
</para>
|
||||
<para>Run this command to create a backup of a volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-create <replaceable>VOLUME</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>VOLUME</replaceable> is the name or ID of
|
||||
the volume.</para>
|
||||
<para>The previous command will also return a backup ID. Use this
|
||||
backup ID when restoring the volume, as in:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-restore <replaceable>backup_ID</replaceable></userinput></screen>
|
||||
<para>As mentioned earlier, volume backups are dependent on the
|
||||
Block Storage database. Because of this, we recommend that you
|
||||
also back up your Block Storage database regularly in order to
|
||||
ensure data recovery.</para>
|
||||
<note><para>Alternatively, you can export and save the metadata of
|
||||
selected volume backups. Doing so will preclude the need to
|
||||
back up the entire Block Storage database. This is particularly
|
||||
useful if you only need a small subset of volumes to survive a
|
||||
catastrophic database failure.</para>
|
||||
<para>For more information on how to export and import volume
|
||||
backup metadata, see <xref linkend="volume-backup-restore-export-import"/>.</para></note>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="volume-backup-restore">
|
||||
<title>Back up and restore volumes</title>
|
||||
<para>The <command>cinder</command> command-line interface provides the tools for creating a
|
||||
volume backup. You can restore a volume from a backup as long as the backup's associated
|
||||
database information (or backup metadata) is intact in the Block Storage database.</para>
|
||||
<para>Run this command to create a backup of a volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-create <replaceable>VOLUME</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>VOLUME</replaceable> is the name or ID of the volume.</para>
|
||||
<para>This command also returns a backup ID. Use this backup ID when restoring the volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-restore <replaceable>BACKUP_ID</replaceable></userinput></screen>
|
||||
<para>Because volume backups are dependent on the Block Storage database, you must also back up
|
||||
your Block Storage database regularly to ensure data recovery.</para>
|
||||
<note>
|
||||
<para>Alternatively, you can export and save the metadata of selected volume backups. Doing so
|
||||
precludes the need to back up the entire Block Storage database. This is useful if you need
|
||||
only a small subset of volumes to survive a catastrophic database failure.</para>
|
||||
<para>For more information about how to export and import volume backup metadata, see <xref
|
||||
linkend="volume-backup-restore-export-import"/>.</para>
|
||||
</note>
|
||||
</section>
|
||||
|
@ -1,40 +1,32 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_install-dashboard">
|
||||
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="ch_install-dashboard">
|
||||
<title>Dashboard</title>
|
||||
<para>The
|
||||
OpenStack dashboard is a web-based interface that allows you to manage
|
||||
OpenStack resources and services. The dashboard allows you to interact
|
||||
with the OpenStack Compute cloud controller using the OpenStack APIs.
|
||||
For more information about installing and configuring the dashboard,
|
||||
see the <citetitle>OpenStack Installation Guide</citetitle> for your
|
||||
operating system.
|
||||
</para>
|
||||
<para>
|
||||
For more information about using the dashboard, see:
|
||||
<para>The OpenStack dashboard is a web-based interface that allows you to manage OpenStack
|
||||
resources and services. The dashboard allows you to interact with the OpenStack Compute
|
||||
cloud controller using the OpenStack APIs. For more information about installing and
|
||||
configuring the dashboard, see the <citetitle>OpenStack Installation Guide</citetitle> for
|
||||
your operating system.</para>
|
||||
<para>Dashboard resources:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><xref linkend="dashboard-custom-brand"/>, for customizing
|
||||
the dashboard.</para>
|
||||
<para>To customize the dashboard, see <xref linkend="dashboard-custom-brand"/>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><xref linkend="dashboard-sessions"/>, for setting up session
|
||||
storage for the dashboard.</para>
|
||||
<para>To set up session storage for the dashboard, see <xref
|
||||
linkend="dashboard-sessions"/>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The <link xlink:href="http://docs.openstack.org/developer/horizon/topics/deployment.html">
|
||||
Horizon documentation</link>, for deploying the dashboard.</para>
|
||||
<para>To deploy the dashboard, see the <link
|
||||
xlink:href="http://docs.openstack.org/developer/horizon/topics/deployment.html">
|
||||
Horizon documentation</link>.</para>
|
||||
</listitem>
|
||||
<listitem xml:id="launch_instances">
|
||||
<para>The <link xlink:href="http://docs.openstack.org/user-guide/content/">
|
||||
<citetitle>OpenStack End User Guide</citetitle></link>, for launching
|
||||
instances with the dashboard..</para>
|
||||
<para>To launch instances with the dashboard, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/">
|
||||
<citetitle>OpenStack End User Guide</citetitle></link>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<xi:include href="../common/section_dashboard_customizing.xml"/>
|
||||
<xi:include href="../common/section_dashboard_sessions.xml"/>
|
||||
</chapter>
|
||||
|
@ -1,7 +1,5 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_configuring-compute-to-use-ipv6-addresses">
|
||||
<title>Configure Compute to use IPv6 addresses</title>
|
||||
<para>If you are using OpenStack Compute with <systemitem>nova-network</systemitem>, you can put
|
||||
@ -13,43 +11,49 @@
|
||||
project uses a different 64-bit global routing prefix. In <literal>FlatDHCPManager</literal>,
|
||||
all instances use one 64-bit global routing prefix.</para>
|
||||
<para>This configuration was tested with VM images that have an IPv6 stateless address auto
|
||||
configuration capability. This capability is required for any VM you want to run with
|
||||
an IPv6 address. You must use EUI-64 address for stateless address auto configuration.
|
||||
Each node that executes a <literal>nova-*</literal> service must have
|
||||
<literal>python-netaddr</literal> and <literal>radvd</literal> installed.
|
||||
</para>
|
||||
configuration capability. This capability is required for any VM you want to run with an IPv6
|
||||
address. You must use EUI-64 address for stateless address auto configuration. Each node that
|
||||
executes a <literal>nova-*</literal> service must have <literal>python-netaddr</literal> and
|
||||
<literal>radvd</literal> installed.</para>
|
||||
<procedure>
|
||||
<title>Switch into IPv4/IPv6 dual-stack mode</title>
|
||||
<step><para>On all nodes running a <literal>nova-*</literal> service, install
|
||||
<step>
|
||||
<para>On all nodes running a <literal>nova-*</literal> service, install
|
||||
<systemitem>python-netaddr</systemitem>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install python-netaddr</userinput></screen></step>
|
||||
<step><para>On all <literal>nova-network</literal> nodes, install <literal>radvd</literal> and configure
|
||||
IPv6 networking:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install python-netaddr</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>On all <literal>nova-network</literal> nodes, install <literal>radvd</literal> and
|
||||
configure IPv6 networking:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install radvd</userinput>
|
||||
<prompt>#</prompt> <userinput>echo 1 > /proc/sys/net/ipv6/conf/all/forwarding</userinput>
|
||||
<prompt>#</prompt> <userinput>echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra</userinput></screen></step>
|
||||
<step><para>Edit the <filename>nova.conf</filename> file on all nodes to specify
|
||||
<literal>use_ipv6 = True</literal>.</para></step>
|
||||
<step><para>Restart all <literal>nova-*</literal> services.</para></step>
|
||||
<prompt>#</prompt> <userinput>echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>nova.conf</filename> file on all nodes to specify <literal>use_ipv6 =
|
||||
True</literal>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart all <literal>nova-*</literal> services.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<note><para>You can add a fixed range for IPv6 addresses to the <command>nova network-create</command>
|
||||
<note>
|
||||
<para>You can add a fixed range for IPv6 addresses to the <command>nova network-create</command>
|
||||
command. Specify <option>public</option> or <option>private</option> after the
|
||||
<option>network-create</option> parameter.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 <replaceable>fixed_range_v4</replaceable> --vlan <replaceable>vlan_id</replaceable> --vpn <replaceable>vpn_start</replaceable> --fixed-range-v6 <replaceable>fixed_range_v6</replaceable></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 <replaceable>FIXED_RANGE_V4</replaceable> --vlan <replaceable>VLAN_ID</replaceable> --vpn <replaceable>VPN_START</replaceable> --fixed-range-v6 <replaceable>FIXED_RANGE_V6</replaceable></userinput></screen>
|
||||
<para>You can set IPv6 global routing prefix by using the <option>--fixed_range_v6</option>
|
||||
parameter. The default value for the parameter is: <literal>fd00::/48</literal>.
|
||||
</para>
|
||||
parameter. The default value for the parameter is <literal>fd00::/48</literal>.</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When you use <literal>FlatDHCPManager</literal>, the command uses the original
|
||||
<option>--fixed_range_v6</option> value. For example:</para>
|
||||
<option>--fixed_range_v6</option> value. For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 10.0.2.0/24 --fixed-range-v6 fd00:1::/48</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you use <literal>VlanManager</literal>, the command
|
||||
increments the subnet ID to create subnet prefixes.
|
||||
Guest VMs use this prefix to generate their IPv6 global
|
||||
unicast address. For example:</para>
|
||||
<para>When you use <literal>VlanManager</literal>, the command increments the subnet ID to
|
||||
create subnet prefixes. Guest VMs use this prefix to generate their IPv6 global unicast
|
||||
address. For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 10.0.1.0/24 --vlan 100 --vpn 1000 --fixed-range-v6 fd00:1::/48</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
@ -1,14 +1,11 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_configuring-compute-migrations">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Configure migrations</title>
|
||||
<note>
|
||||
<para>Only cloud administrators can perform live migrations. If your cloud
|
||||
is configured to use cells, you can perform live migration
|
||||
within but not between cells.</para>
|
||||
<para>Only cloud administrators can perform live migrations. If your cloud is configured to use
|
||||
cells, you can perform live migration within but not between cells.</para>
|
||||
</note>
|
||||
<para>Migration enables an administrator to move a virtual-machine instance from one compute host
|
||||
to another. This feature is useful when a compute host requires maintenance. Migration can also
|
||||
@ -17,185 +14,166 @@
|
||||
<para>The migration types are:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Migration</emphasis> (or non-live
|
||||
migration). The instance is shut down (and the instance knows
|
||||
that it was rebooted) for a period of time to be moved to
|
||||
<para><emphasis role="bold">Migration</emphasis> (or non-live migration). The instance is shut
|
||||
down (and the instance knows that it was rebooted) for a period of time to be moved to
|
||||
another hypervisor.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Live migration</emphasis> (or true live migration). Almost no
|
||||
instance downtime. Useful when the instances must be kept running during the migration. The
|
||||
types of <firstterm>live migration</firstterm> are:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage-based live
|
||||
migration</emphasis>. Both hypervisors have access to shared
|
||||
storage.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Block live migration</emphasis>. No
|
||||
shared storage is required. Incompatible with read-only devices
|
||||
such as CD-ROMs and <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/config-drive.html"
|
||||
>Configuration Drive (config_drive)</link>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Volume-backed live
|
||||
migration</emphasis>. When instances are backed by volumes
|
||||
rather than ephemeral disk, no shared storage is required, and
|
||||
migration is supported (currently only in libvirt-based
|
||||
hypervisors).</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
types of <firstterm>live migration</firstterm> are:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage-based live migration</emphasis>. Both
|
||||
hypervisors have access to shared storage.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Block live migration</emphasis>. No shared storage is
|
||||
required. Incompatible with read-only devices such as CD-ROMs and <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/config-drive.html"
|
||||
>Configuration Drive (config_drive)</link>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Volume-backed live migration</emphasis>. When instances are
|
||||
backed by volumes rather than ephemeral disk, no shared storage is required, and
|
||||
migration is supported (currently only in libvirt-based hypervisors).</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The following sections describe how to configure your hosts
|
||||
and compute nodes for migrations by using the KVM and XenServer
|
||||
hypervisors.</para>
|
||||
<para>The following sections describe how to configure your hosts and compute nodes for migrations
|
||||
by using the KVM and XenServer hypervisors.</para>
|
||||
<section xml:id="configuring-migrations-kvm-libvirt">
|
||||
<title>KVM-Libvirt</title>
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Hypervisor:</emphasis> KVM with
|
||||
libvirt</para>
|
||||
<para><emphasis role="bold">Hypervisor:</emphasis> KVM with libvirt</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage:</emphasis>
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename>
|
||||
(for example, <filename>/var/lib/nova/instances</filename>)
|
||||
has to be mounted by shared storage. This guide uses NFS but
|
||||
other options, including the <link
|
||||
xlink:href="http://gluster.org/community/documentation//index.php/OSConnect"
|
||||
>OpenStack Gluster Connector</link> are available.</para>
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename> (for example,
|
||||
<filename>/var/lib/nova/instances</filename>) has to be mounted by shared storage. This
|
||||
guide uses NFS but other options, including the <link
|
||||
xlink:href="http://gluster.org/community/documentation//index.php/OSConnect">OpenStack
|
||||
Gluster Connector</link> are available.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Instances:</emphasis> Instance can
|
||||
be migrated with iSCSI based volumes</para>
|
||||
<para><emphasis role="bold">Instances:</emphasis> Instance can be migrated with iSCSI based
|
||||
volumes</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Because the Compute service does not use the libvirt
|
||||
live migration functionality by default, guests are
|
||||
suspended before migration and might experience several
|
||||
minutes of downtime. For details, see <xref
|
||||
linkend="true-live-migration-kvm-libvirt"/>.</para>
|
||||
<para>Because the Compute service does not use the libvirt live migration functionality by
|
||||
default, guests are suspended before migration and might experience several minutes of
|
||||
downtime. For details, see <xref linkend="true-live-migration-kvm-libvirt"/>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>This guide assumes the default value for
|
||||
<option>instances_path</option> in your
|
||||
<para>This guide assumes the default value for <option>instances_path</option> in your
|
||||
<filename>nova.conf</filename> file
|
||||
(<filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename>).
|
||||
If you have changed the <literal>state_path</literal> or
|
||||
<literal>instances_path</literal> variables, modify
|
||||
accordingly.</para>
|
||||
(<filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename>). If you
|
||||
have changed the <literal>state_path</literal> or <literal>instances_path</literal>
|
||||
variables, modify accordingly.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>You must specify
|
||||
<literal>vncserver_listen=0.0.0.0</literal> or live
|
||||
migration does not work correctly.</para>
|
||||
<para>You must specify <literal>vncserver_listen=0.0.0.0</literal> or live migration does
|
||||
not work correctly.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
<section xml:id="section_example-compute-install">
|
||||
<title>Example Compute installation environment</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Prepare at least three servers; for example, <literal>HostA</literal>,
|
||||
<literal>HostB</literal>, and <literal>HostC</literal>: <itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>HostA</literal> is the <firstterm baseform="cloud controller">Cloud
|
||||
Controller</firstterm>, and should run these services: <systemitem
|
||||
class="service">nova-api</systemitem>, <systemitem class="service"
|
||||
>nova-scheduler</systemitem>, <literal>nova-network</literal>, <systemitem
|
||||
class="service">cinder-volume</systemitem>, and
|
||||
<literal>nova-objectstore</literal>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>HostB</literal> and <literal>HostC</literal> are the <firstterm
|
||||
baseform="compute node">compute nodes</firstterm> that run <systemitem
|
||||
class="service">nova-compute</systemitem>.</para>
|
||||
</listitem>
|
||||
</itemizedlist></para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Prepare at least three servers; for example, <literal>HostA</literal>,
|
||||
<literal>HostB</literal>, and <literal>HostC</literal>:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>HostA</literal> is the <firstterm baseform="cloud controller">Cloud
|
||||
Controller</firstterm>, and should run these services: <systemitem class="service"
|
||||
>nova-api</systemitem>, <systemitem class="service">nova-scheduler</systemitem>,
|
||||
<literal>nova-network</literal>, <systemitem class="service"
|
||||
>cinder-volume</systemitem>, and <literal>nova-objectstore</literal>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>HostB</literal> and <literal>HostC</literal> are the <firstterm
|
||||
baseform="compute node">compute nodes</firstterm> that run <systemitem
|
||||
class="service">nova-compute</systemitem>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Ensure that <literal><replaceable>NOVA-INST-DIR</replaceable></literal> (set with
|
||||
<literal>state_path</literal> in the <filename>nova.conf</filename> file) is the same
|
||||
on all hosts.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In this example, <literal>HostA</literal> is the NFSv4
|
||||
server that exports
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename> directory.
|
||||
<literal>HostB</literal> and <literal>HostC</literal>
|
||||
are NFSv4 clients that mount it.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<procedure>
|
||||
<title>To configure your system</title>
|
||||
<step>
|
||||
<para>Configure your DNS or <filename>/etc/hosts</filename>
|
||||
and ensure it is consistent across all hosts. Make sure that
|
||||
the three hosts can perform name resolution with each other.
|
||||
As a test, use the <command>ping</command> command to ping
|
||||
each host from one another.</para>
|
||||
<screen><prompt>$</prompt> <userinput>ping HostA</userinput>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In this example, <literal>HostA</literal> is the NFSv4 server that exports
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename> directory.
|
||||
<literal>HostB</literal> and <literal>HostC</literal> are NFSv4 clients that mount
|
||||
it.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<procedure>
|
||||
<title>To configure your system</title>
|
||||
<step>
|
||||
<para>Configure your DNS or <filename>/etc/hosts</filename> and ensure it is consistent
|
||||
across all hosts. Make sure that the three hosts can perform name resolution with each
|
||||
other. As a test, use the <command>ping</command> command to ping each host from one
|
||||
another.</para>
|
||||
<screen><prompt>$</prompt> <userinput>ping HostA</userinput>
|
||||
<prompt>$</prompt> <userinput>ping HostB</userinput>
|
||||
<prompt>$</prompt> <userinput>ping HostC</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Ensure that the UID and GID of your Compute and libvirt users are identical between
|
||||
</step>
|
||||
<step>
|
||||
<para>Ensure that the UID and GID of your Compute and libvirt users are identical between
|
||||
each of your servers. This ensures that the permissions on the NFS mount works
|
||||
correctly.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Export <filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename> from
|
||||
</step>
|
||||
<step>
|
||||
<para>Export <filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename> from
|
||||
<literal>HostA</literal>, and have it readable and writable by the Compute user on
|
||||
<literal>HostB</literal> and <literal>HostC</literal>.</para>
|
||||
<para>For more information, see: <link
|
||||
xlink:href="https://help.ubuntu.com/community/SettingUpNFSHowTo"
|
||||
>SettingUpNFSHowTo</link> or <link
|
||||
xlink:href="http://www.cyberciti.biz/faq/centos-fedora-rhel-nfs-v4-configuration/"
|
||||
>CentOS / Redhat: Setup NFS v4.0 File Server</link></para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure the NFS server at <literal>HostA</literal> by adding the following line to
|
||||
<para>For more information, see: <link
|
||||
xlink:href="https://help.ubuntu.com/community/SettingUpNFSHowTo"
|
||||
>SettingUpNFSHowTo</link> or <link
|
||||
xlink:href="http://www.cyberciti.biz/faq/centos-fedora-rhel-nfs-v4-configuration/"
|
||||
>CentOS / Redhat: Setup NFS v4.0 File Server</link></para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure the NFS server at <literal>HostA</literal> by adding the following line to
|
||||
the <filename>/etc/exports</filename> file:</para>
|
||||
<programlisting><replaceable>NOVA-INST-DIR</replaceable>/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash)</programlisting>
|
||||
<para>Change the subnet mask (<literal>255.255.0.0</literal>)
|
||||
to the appropriate value to include the IP addresses of
|
||||
<literal>HostB</literal> and <literal>HostC</literal>.
|
||||
Then restart the NFS server:</para>
|
||||
<screen><prompt>#</prompt> <userinput>/etc/init.d/nfs-kernel-server restart</userinput>
|
||||
<programlisting><replaceable>NOVA-INST-DIR</replaceable>/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash)</programlisting>
|
||||
<para>Change the subnet mask (<literal>255.255.0.0</literal>) to the appropriate value to
|
||||
include the IP addresses of <literal>HostB</literal> and <literal>HostC</literal>. Then
|
||||
restart the NFS server:</para>
|
||||
<screen><prompt>#</prompt> <userinput>/etc/init.d/nfs-kernel-server restart</userinput>
|
||||
<prompt>#</prompt> <userinput>/etc/init.d/idmapd restart</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the 'execute/search' bit on your shared
|
||||
directory.</para>
|
||||
<para>On both compute nodes, make sure to enable the
|
||||
'execute/search' bit to allow qemu to be able to use the
|
||||
images within the directories. On all hosts, run the
|
||||
following command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>chmod o+x <replaceable>NOVA-INST-DIR</replaceable>/instances</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure NFS at HostB and HostC by adding the following line to the
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the 'execute/search' bit on your shared directory.</para>
|
||||
<para>On both compute nodes, make sure to enable the 'execute/search' bit to allow qemu to
|
||||
be able to use the images within the directories. On all hosts, run the following
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>chmod o+x <replaceable>NOVA-INST-DIR</replaceable>/instances</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure NFS at HostB and HostC by adding the following line to the
|
||||
<filename>/etc/fstab</filename> file:</para>
|
||||
<programlisting>HostA:/ /<replaceable>NOVA-INST-DIR</replaceable>/instances nfs4 defaults 0 0</programlisting>
|
||||
<para>Ensure that you can mount the exported directory can be mounted:</para>
|
||||
<screen><prompt>$</prompt> <userinput>mount -a -v</userinput></screen>
|
||||
<para>Check that HostA can see the
|
||||
<programlisting>HostA:/ /<replaceable>NOVA-INST-DIR</replaceable>/instances nfs4 defaults 0 0</programlisting>
|
||||
<para>Ensure that you can mount the exported directory can be mounted:</para>
|
||||
<screen><prompt>$</prompt> <userinput>mount -a -v</userinput></screen>
|
||||
<para>Check that HostA can see the
|
||||
"<filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename>"
|
||||
directory:</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput></screen>
|
||||
<screen><computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-19 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<para>Perform the same check at HostB and HostC, paying special attention to the permissions
|
||||
(Compute should be able to write):</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput></screen>
|
||||
<screen><computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-07 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>df -k</userinput></screen>
|
||||
<screen><computeroutput>Filesystem 1K-blocks Used Available Use% Mounted on
|
||||
directory:</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput></screen>
|
||||
<screen><computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-19 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<para>Perform the same check at HostB and HostC, paying special attention to the
|
||||
permissions (Compute should be able to write):</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput></screen>
|
||||
<screen><computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-07 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>df -k</userinput></screen>
|
||||
<screen><computeroutput>Filesystem 1K-blocks Used Available Use% Mounted on
|
||||
/dev/sda1 921514972 4180880 870523828 1% /
|
||||
none 16498340 1228 16497112 1% /dev
|
||||
none 16502856 0 16502856 0% /dev/shm
|
||||
@ -203,56 +181,58 @@ none 16502856 368 16502488 1% /var/run
|
||||
none 16502856 0 16502856 0% /var/lock
|
||||
none 16502856 0 16502856 0% /lib/init/rw
|
||||
HostA: 921515008 101921792 772783104 12% /var/lib/nova/instances ( <--- this line is important.)</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the libvirt configurations so that the calls can be made securely. These methods enable remote access over TCP and are not documented here. Please consult your network administrator for assistance in deciding how to configure access.</para>
|
||||
<para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the libvirt configurations so that the calls can be made securely. These
|
||||
methods enable remote access over TCP and are not documented here. Please consult your
|
||||
network administrator for assistance in deciding how to configure access.</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>SSH tunnel to libvirtd's UNIX socket</para></listitem>
|
||||
<listitem><para>libvirtd TCP socket, with GSSAPI/Kerberos for
|
||||
auth+data encryption</para></listitem>
|
||||
<listitem><para>libvirtd TCP socket, with TLS for encryption and x509 client certs for authentication</para></listitem>
|
||||
<listitem><para>libvirtd TCP socket, with TLS for encryption and Kerberos for authentication</para>
|
||||
<para>SSH tunnel to libvirtd's UNIX socket</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with GSSAPI/Kerberos for auth+data encryption</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with TLS for encryption and x509 client certs for
|
||||
authentication</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with TLS for encryption and Kerberos for
|
||||
authentication</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>Restart libvirt. After you run the command, ensure that
|
||||
libvirt is successfully restarted:</para>
|
||||
<screen><prompt>#</prompt> <userinput>stop libvirt-bin && start libvirt-bin</userinput>
|
||||
<prompt>$</prompt> <userinput>ps -ef | grep libvirt</userinput></screen>
|
||||
<screen><computeroutput>root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure your firewall to allow libvirt to communicate
|
||||
between nodes.</para>
|
||||
<para>By default, libvirt listens on TCP port 16509, and an ephemeral TCP range from 49152
|
||||
<para>Restart libvirt. After you run the command, ensure that libvirt is successfully
|
||||
restarted:</para>
|
||||
<screen><prompt>#</prompt> <userinput>stop libvirt-bin && start libvirt-bin</userinput>
|
||||
<prompt>$</prompt> <userinput>ps -ef | grep libvirt</userinput>
|
||||
<computeroutput>root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure your firewall to allow libvirt to communicate between nodes.</para>
|
||||
<para>By default, libvirt listens on TCP port 16509, and an ephemeral TCP range from 49152
|
||||
to 49261 is used for the KVM communications. Based on the secure remote access TCP
|
||||
configuration you chose, be careful choosing what ports you open and understand who has
|
||||
access. For information about ports that are used with libvirt, see <link
|
||||
xlink:href="http://libvirt.org/remote.html#Remote_libvirtd_configuration">the libvirt
|
||||
documentation</link>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>You can now configure options for live migration. In
|
||||
most cases, you do not need to configure any options. The
|
||||
following chart is for advanced usage only.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<xi:include href="../../common/tables/nova-livemigration.xml"/>
|
||||
</step>
|
||||
<step>
|
||||
<para>You can now configure options for live migration. In most cases, you do not need to
|
||||
configure any options. The following chart is for advanced usage only.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<xi:include href="../../common/tables/nova-livemigration.xml"/>
|
||||
</section>
|
||||
<section xml:id="true-live-migration-kvm-libvirt">
|
||||
<title>Enable true live migration</title>
|
||||
<para>By default, the Compute service does not use the libvirt
|
||||
live migration functionality. To enable this functionality,
|
||||
add the following line to the <filename>nova.conf</filename>
|
||||
<para>By default, the Compute service does not use the libvirt live migration functionality.
|
||||
To enable this functionality, add the following line to the <filename>nova.conf</filename>
|
||||
file:</para>
|
||||
<programlisting>live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE</programlisting>
|
||||
<para>The Compute service does not use libvirt's live migration
|
||||
by default because there is a risk that the migration process
|
||||
never ends. This can happen if the guest operating system
|
||||
dirties blocks on the disk faster than they can be
|
||||
migrated.</para>
|
||||
<para>The Compute service does not use libvirt's live migration by default because there is a
|
||||
risk that the migration process never ends. This can happen if the guest operating system
|
||||
dirties blocks on the disk faster than they can be migrated.</para>
|
||||
</section>
|
||||
</section>
|
||||
<!--status: good, right place-->
|
||||
@ -263,13 +243,11 @@ HostA: 921515008 101921792 772783104 12% /var/lib/nova/instances ( <
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Compatible XenServer
|
||||
hypervisors</emphasis>. For more information, see the
|
||||
<link
|
||||
<para><emphasis role="bold">Compatible XenServer hypervisors</emphasis>. For more
|
||||
information, see the <link
|
||||
xlink:href="http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#pooling_homogeneity_requirements"
|
||||
>Requirements for Creating Resource Pools</link> section
|
||||
of the <citetitle>XenServer Administrator's
|
||||
Guide</citetitle>.</para>
|
||||
>Requirements for Creating Resource Pools</link> section of the <citetitle>XenServer
|
||||
Administrator's Guide</citetitle>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage</emphasis>. An NFS export, visible to all
|
||||
@ -277,57 +255,46 @@ HostA: 921515008 101921792 772783104 12% /var/lib/nova/instances ( <
|
||||
<note>
|
||||
<para>For the supported NFS versions, see the <link
|
||||
xlink:href="http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#id1002701"
|
||||
>NFS VHD</link> section of the <citetitle>XenServer
|
||||
Administrator's Guide</citetitle>.</para>
|
||||
>NFS VHD</link> section of the <citetitle>XenServer Administrator's
|
||||
Guide</citetitle>.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>To use shared storage live migration with XenServer
|
||||
hypervisors, the hosts must be joined to a XenServer pool. To
|
||||
create that pool, a host aggregate must be created with
|
||||
special metadata. This metadata is used by the XAPI plug-ins
|
||||
to establish the pool.</para>
|
||||
<para>To use shared storage live migration with XenServer hypervisors, the hosts must be
|
||||
joined to a XenServer pool. To create that pool, a host aggregate must be created with
|
||||
special metadata. This metadata is used by the XAPI plug-ins to establish the pool.</para>
|
||||
<procedure>
|
||||
<title>To use shared storage live migration with XenServer
|
||||
hypervisors</title>
|
||||
<title>To use shared storage live migration with XenServer hypervisors</title>
|
||||
<step>
|
||||
<para>Add an NFS VHD storage to your master XenServer, and
|
||||
set it as default SR. For more information, please refer
|
||||
to the <link
|
||||
xlink:href="http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#id1002701"
|
||||
>NFS VHD</link> section in the <citetitle>XenServer
|
||||
Administrator's Guide</citetitle>.</para>
|
||||
<para>Add an NFS VHD storage to your master XenServer, and set it as default SR. For more
|
||||
information, see NFS VHD in the <citetitle>XenServer Administrator's
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure all the compute nodes to use the default sr
|
||||
for pool operations. Add this line to your
|
||||
<filename>nova.conf</filename> configuration files
|
||||
across your compute
|
||||
nodes:<programlisting>sr_matching_filter=default-sr:true</programlisting></para>
|
||||
<para>Configure all compute nodes to use the default <literal>sr</literal> for pool
|
||||
operations. Add this line to your <filename>nova.conf</filename> configuration files
|
||||
across your compute nodes:</para>
|
||||
<programlisting>sr_matching_filter=default-sr:true</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a host aggregate:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-create <name-for-pool> <availability-zone></userinput></screen>
|
||||
<para>The command displays a table that contains the ID of
|
||||
the newly created aggregate.</para>
|
||||
<para>Now add special metadata to the aggregate, to mark it
|
||||
as a hypervisor pool:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <aggregate-id> hypervisor_pool=true</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <aggregate-id> operational_state=created</userinput></screen>
|
||||
<para>Make the first compute node part of that
|
||||
aggregate:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <aggregate-id> <name-of-master-compute></userinput></screen>
|
||||
<para>At this point, the host is part of a XenServer
|
||||
pool.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-create <replaceable>POOL_NAME</replaceable> <replaceable>AVAILABILITY_ZONE</replaceable></userinput></screen>
|
||||
<para>The command displays a table that contains the ID of the newly created
|
||||
aggregate.</para>
|
||||
<para>Now add special metadata to the aggregate, to mark it as a hypervisor pool:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <replaceable>AGGREGATE_ID</replaceable> hypervisor_pool=true</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <replaceable>AGGREGATE_ID</replaceable> operational_state=created</userinput></screen>
|
||||
<para>Make the first compute node part of that aggregate:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <replaceable>AGGREGATE_ID</replaceable> <replaceable>MASTER_COMPUTE_NAME</replaceable></userinput></screen>
|
||||
<para>The host is now part of a XenServer pool.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Add additional hosts to the pool:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <aggregate-id> <compute-host-name></userinput></screen>
|
||||
<para>Add hosts to the pool:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <replaceable>AGGREGATE_ID</replaceable> <replaceable>COMPUTE_HOST_NAME</replaceable></userinput></screen>
|
||||
<note>
|
||||
<para>At this point, the added compute node and the host
|
||||
are shut down, to join the host to the XenServer pool.
|
||||
The operation fails, if any server other than the
|
||||
compute node is running/suspended on your host.</para>
|
||||
<para>At this point, the added compute node and the host are shut down, to join the host
|
||||
to the XenServer pool. The operation fails, if any server other than the compute node
|
||||
is running/suspended on your host.</para>
|
||||
</note>
|
||||
</step>
|
||||
</procedure>
|
||||
@ -338,23 +305,21 @@ HostA: 921515008 101921792 772783104 12% /var/lib/nova/instances ( <
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Compatible XenServer
|
||||
hypervisors</emphasis>. The hypervisors must support the
|
||||
Storage XenMotion feature. See your XenServer manual to
|
||||
make sure your edition has this feature.</para>
|
||||
<para><emphasis role="bold">Compatible XenServer hypervisors</emphasis>. The hypervisors
|
||||
must support the Storage XenMotion feature. See your XenServer manual to make sure your
|
||||
edition has this feature.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To use block migration, you must use the
|
||||
CHANGE THIS == <parameter>==block-migrate</parameter> parameter with
|
||||
the live migration command.</para>
|
||||
<para>To use block migration, you must use the CHANGE THIS ==
|
||||
<parameter>==block-migrate</parameter> parameter with the live migration
|
||||
command.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Block migration works only with EXT local storage
|
||||
SRs, and the server must not have any volumes
|
||||
attached.</para>
|
||||
<para>Block migration works only with EXT local storage SRs, and the server must not
|
||||
have any volumes attached.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
|
@ -1,144 +1,116 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_compute-images-and-instances">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_compute-images-and-instances">
|
||||
<title>Images and instances</title>
|
||||
<para>Disk images provide templates for virtual machine file
|
||||
systems. The Image Service manages storage and management
|
||||
of images.</para>
|
||||
<para>Instances are the individual virtual machines that run
|
||||
on physical compute nodes. Users can launch any number of
|
||||
instances from the same image. Each launched instance runs
|
||||
from a copy of the base image so that any changes made to
|
||||
the instance do not affect the base image. You can take
|
||||
snapshots of running instances to create an image based on
|
||||
the current disk state of a particular instance. The
|
||||
Compute services manages instances.</para>
|
||||
<para>Disk images provide templates for virtual machine file systems. The Image Service manages
|
||||
storage and management of images.</para>
|
||||
<para>Instances are the individual virtual machines that run on physical compute nodes. Users
|
||||
can launch any number of instances from the same image. Each launched instance runs from a
|
||||
copy of the base image so that any changes made to the instance do not affect the base
|
||||
image. You can take snapshots of running instances to create an image based on the current
|
||||
disk state of a particular instance. The Compute services manages instances.</para>
|
||||
<para>When you launch an instance, you must choose a <literal>flavor</literal>, which represents
|
||||
a set of virtual resources. Flavors define how many virtual CPUs an instance has and the
|
||||
amount of RAM and size of its ephemeral disks. OpenStack provides a number of predefined
|
||||
flavors that you can edit or add to. Users must select from the set of available flavors
|
||||
defined on their cloud.</para>
|
||||
<note><itemizedlist>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>For more information about creating and troubleshooting images, see the
|
||||
<link
|
||||
xlink:href="http://docs.openstack.org/image-guide/content/"
|
||||
><citetitle>OpenStack Virtual Machine Image Guide</citetitle></link>.
|
||||
</para>
|
||||
<para>For more information about creating and troubleshooting images, see the <link
|
||||
xlink:href="http://docs.openstack.org/image-guide/content/"
|
||||
><citetitle>OpenStack Virtual Machine Image Guide</citetitle></link>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For more information about image configuration options, see the <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-image-service.html"
|
||||
>Image Services</link> section of the <citetitle>OpenStack Configuration
|
||||
Reference</citetitle>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For more information about flavors, see <xref linkend="customize-flavors"/> or the <link
|
||||
xlink:href="http://docs.openstack.org/openstack-ops/content/flavors.html"
|
||||
>Flavors</link> section in the <citetitle>OpenStack Operations
|
||||
Guide</citetitle>.</para>
|
||||
</listitem>
|
||||
</itemizedlist></note>
|
||||
<para>You can add and remove additional resources from running
|
||||
instances, such as persistent volume storage, or public IP
|
||||
addresses. The example used in this chapter is of a
|
||||
typical virtual system within an OpenStack cloud. It uses
|
||||
the <systemitem class="service">cinder-volume</systemitem>
|
||||
service, which provides persistent block storage, instead
|
||||
of the ephemeral storage provided by the selected instance
|
||||
flavor.</para>
|
||||
<para>This diagram shows the system state prior to launching an instance. The image store,
|
||||
<para>For more information about image configuration options, see the <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-image-service.html"
|
||||
>Image Services</link> section of the <citetitle>OpenStack Configuration
|
||||
Reference</citetitle>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For more information about flavors, see <xref linkend="customize-flavors"/> or
|
||||
<link
|
||||
xlink:href="http://docs.openstack.org/openstack-ops/content/flavors.html"
|
||||
>Flavors</link> in the <citetitle>OpenStack Operations
|
||||
Guide</citetitle>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
<para>You can add and remove additional resources from running instances, such as persistent
|
||||
volume storage, or public IP addresses. The example used in this chapter is of a typical
|
||||
virtual system within an OpenStack cloud. It uses the <systemitem class="service"
|
||||
>cinder-volume</systemitem> service, which provides persistent block storage, instead of
|
||||
the ephemeral storage provided by the selected instance flavor.</para>
|
||||
<para>This diagram shows the system state prior to launching an instance. The image store,
|
||||
fronted by the Image service (glance) has a number of predefined images. Inside the cloud, a
|
||||
compute node contains the available vCPU, memory, and local disk resources. Additionally,
|
||||
the <systemitem class="service">cinder-volume</systemitem> service provides a number of
|
||||
predefined volumes.</para>
|
||||
<figure xml:id="initial-instance-state-figure">
|
||||
<title>Base image state with no running instances</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/instance-life-1.png"
|
||||
/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>To launch an instance, select an image, a flavor, and
|
||||
other optional attributes. The selected flavor provides a
|
||||
root volume, labeled <literal>vda</literal> in this
|
||||
diagram, and additional ephemeral storage, labeled
|
||||
<literal>vdb</literal>. In this example, the
|
||||
<systemitem class="service">cinder-volume</systemitem>
|
||||
store is mapped to the third virtual disk on this
|
||||
instance, <literal>vdc</literal>.</para>
|
||||
<figure xml:id="run-instance-state-figure">
|
||||
<title>Instance creation from image and runtime
|
||||
state</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/instance-life-2.png"
|
||||
/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>The base image is copied from the image store to the
|
||||
local disk. The local disk is the first disk that the
|
||||
instance accesses, and is labeled <literal>vda</literal>.
|
||||
By using smaller images, your instances start up faster as
|
||||
less data needs to be copied across the network.</para>
|
||||
<para>A new empty disk, labeled <literal>vdb</literal> is also
|
||||
created. This is an empty ephemeral disk, which is
|
||||
destroyed when you delete the instance.</para>
|
||||
<para>The compute node is attached to the <systemitem
|
||||
class="service">cinder-volume</systemitem> using
|
||||
iSCSI, and maps to the third disk, <literal>vdc</literal>.
|
||||
The vCPU and memory resources are provisioned and the
|
||||
instance is booted from <literal>vda</literal>. The
|
||||
instance runs and changes data on the disks as indicated
|
||||
in red in the diagram.
|
||||
<!--This isn't very accessible, need to consider rewording to explain more fully. LKB -->
|
||||
</para>
|
||||
<note>
|
||||
<para>Some of the details in this example scenario might be different in your
|
||||
environment. For example, you might use a different type of back-end storage or
|
||||
different network protocols. One common variant is that the ephemeral storage used for
|
||||
volumes <literal>vda</literal> and <literal>vdb</literal> could be backed by network
|
||||
storage rather than a local disk.</para>
|
||||
</note>
|
||||
<para>When the instance is deleted, the state is reclaimed with the exception of the
|
||||
persistent volume. The ephemeral storage is purged; memory and vCPU resources are released.
|
||||
The image remains unchanged throughout.</para>
|
||||
<figure xml:id="end-instance-state-figure">
|
||||
<title>End state of image and volume after instance
|
||||
exits</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/instance-life-3.png"
|
||||
/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<xi:include href="section_compute-image-mgt.xml"/>
|
||||
<xi:include href="../image/section_glance-property-protection.xml"/>
|
||||
<xi:include href="section_compute-instance-building-blocks.xml"/>
|
||||
<xi:include href="section_compute-instance-mgt-tools.xml"/>
|
||||
<section xml:id="section_instance-scheduling-constraints">
|
||||
<title>Control where instances run</title>
|
||||
<para>The <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>
|
||||
provides detailed information on controlling where your
|
||||
instances run, including ensuring a set of instances run
|
||||
on different compute nodes for service resiliency or on
|
||||
the same node for high performance inter-instance
|
||||
communications.</para>
|
||||
<para>Admin users can specify an exact compute node to run on
|
||||
using the command <command>--availability-zone
|
||||
<replaceable>availability-zone</replaceable>:<replaceable>compute-host</replaceable></command>
|
||||
</para>
|
||||
</section>
|
||||
<figure xml:id="initial-instance-state-figure">
|
||||
<title>Base image state with no running instances</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-1.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>To launch an instance, select an image, a flavor, and other optional attributes. The
|
||||
selected flavor provides a root volume, labeled <literal>vda</literal> in this diagram, and
|
||||
additional ephemeral storage, labeled <literal>vdb</literal>. In this example, the
|
||||
<systemitem class="service">cinder-volume</systemitem> store is mapped to the third
|
||||
virtual disk on this instance, <literal>vdc</literal>.</para>
|
||||
<figure xml:id="run-instance-state-figure">
|
||||
<title>Instance creation from image and runtime state</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-2.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>The base image is copied from the image store to the local disk. The local disk is the
|
||||
first disk that the instance accesses, and is labeled <literal>vda</literal>. By using
|
||||
smaller images, your instances start up faster as less data needs to be copied across the
|
||||
network.</para>
|
||||
<para>A new empty disk, labeled <literal>vdb</literal> is also created. This is an empty
|
||||
ephemeral disk, which is destroyed when you delete the instance.</para>
|
||||
<para>The compute node is attached to the <systemitem class="service">cinder-volume</systemitem>
|
||||
using iSCSI, and maps to the third disk, <literal>vdc</literal>. The vCPU and memory
|
||||
resources are provisioned and the instance is booted from <literal>vda</literal>. The
|
||||
instance runs and changes data on the disks as indicated in red in the diagram.
|
||||
<!--This isn't very accessible, need to consider rewording to explain more fully. LKB --></para>
|
||||
<note>
|
||||
<para>Some details in this example scenario might be different in your environment.
|
||||
For example, you might use a different type of back-end storage or different network
|
||||
protocols. One common variant is that the ephemeral storage used for volumes
|
||||
<literal>vda</literal> and <literal>vdb</literal> could be backed by network storage
|
||||
rather than a local disk.</para>
|
||||
</note>
|
||||
<para>When the instance is deleted, the state is reclaimed with the exception of the persistent
|
||||
volume. The ephemeral storage is purged; memory and vCPU resources are released. The image
|
||||
remains unchanged throughout.</para>
|
||||
<figure xml:id="end-instance-state-figure">
|
||||
<title>End state of image and volume after instance exits</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-3.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<xi:include href="section_compute-image-mgt.xml"/>
|
||||
<xi:include href="../image/section_glance-property-protection.xml"/>
|
||||
<xi:include href="section_compute-instance-building-blocks.xml"/>
|
||||
<xi:include href="section_compute-instance-mgt-tools.xml"/>
|
||||
<section xml:id="section_instance-scheduling-constraints">
|
||||
<title>Control where instances run</title>
|
||||
<para>The <link xlink:href="http://docs.openstack.org/trunk/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link> provides detailed
|
||||
information on controlling where your instances run, including ensuring a set of
|
||||
instances run on different compute nodes for service resiliency or on the same node for
|
||||
high performance inter-instance communications.</para>
|
||||
<para>Administrative users can specify on which compute node to run instances. To do so,
|
||||
specify the <parameter>--availability-zone
|
||||
<replaceable>AVAILABILITY_ZONE</replaceable>:<replaceable>COMPUTE_HOST</replaceable></parameter>
|
||||
parameter.</para>
|
||||
</section>
|
||||
</section>
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,31 +1,28 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_nova-compute-node-down">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_nova-compute-node-down">
|
||||
<title>Recover from a failed compute node</title>
|
||||
<para>If you deployed Compute with a shared file system, you can quickly recover from a failed
|
||||
compute node. Of the two methods covered in these sections, evacuating is the preferred
|
||||
method even in the absence of shared storage. Evacuating provides many benefits over manual
|
||||
recovery, such as re-attachment of volumes and floating IPs.</para>
|
||||
<xi:include href="../../common/section_cli_nova_evacuate.xml"/>
|
||||
<section xml:id="nova-compute-node-down-manual-recovery">
|
||||
<title>Manual recovery</title>
|
||||
<para>To recover a KVM/libvirt compute node, see the previous section. Use the
|
||||
following procedure for all other hypervisors.</para>
|
||||
<procedure>
|
||||
<title>Review host information</title>
|
||||
<step>
|
||||
<para>Identify the VMs on the affected hosts, using tools such as a
|
||||
combination of <literal>nova list</literal> and <literal>nova show</literal> or
|
||||
<xi:include href="../../common/section_cli_nova_evacuate.xml"/>
|
||||
<section xml:id="nova-compute-node-down-manual-recovery">
|
||||
<title>Manual recovery</title>
|
||||
<para>To recover a KVM/libvirt compute node, see the previous section. Use the following
|
||||
procedure for all other hypervisors.</para>
|
||||
<procedure>
|
||||
<title>Review host information</title>
|
||||
<step>
|
||||
<para>Identify the VMs on the affected hosts, using tools such as a combination of
|
||||
<literal>nova list</literal> and <literal>nova show</literal> or
|
||||
<literal>euca-describe-instances</literal>. For example, the following
|
||||
output displays information about instance <systemitem>i-000015b9</systemitem>
|
||||
that is running on node <systemitem>np-rcc54</systemitem>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>euca-describe-instances</userinput>
|
||||
<screen><prompt>$</prompt> <userinput>euca-describe-instances</userinput>
|
||||
<computeroutput>i-000015b9 at3-ui02 running nectarkey (376, np-rcc54) 0 m1.xxlarge 2012-06-19T00:48:11.000Z 115.146.93.60</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
</step>
|
||||
<step>
|
||||
<para>Review the status of the host by querying the Compute database. Some of the
|
||||
important information is highlighted below. The following example converts an
|
||||
EC2 API instance ID into an OpenStack ID; if you used the
|
||||
@ -49,259 +46,242 @@
|
||||
uuid: 3f57699a-e773-4650-a443-b4b37eed5a06
|
||||
...
|
||||
task_state: NULL
|
||||
...</computeroutput></screen></step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Recover the VM</title>
|
||||
<step>
|
||||
<para>After you have determined the status of the VM on the failed host,
|
||||
decide to which compute host the affected VM should be moved. For example, run
|
||||
the following database command to move the VM to
|
||||
<systemitem>np-rcc46</systemitem>:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06';</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>If using a hypervisor that relies on libvirt (such as KVM), it is a
|
||||
good idea to update the <literal>libvirt.xml</literal> file (found in
|
||||
<literal>/var/lib/nova/instances/[instance ID]</literal>). The important
|
||||
changes to make are:</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Change the <literal>DHCPSERVER</literal> value to the host IP
|
||||
address of the compute host that is now the VM's new
|
||||
home.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update the VNC IP, if it isn't already updated, to:
|
||||
<literal>0.0.0.0</literal>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Reboot the VM:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova reboot --hard 3f57699a-e773-4650-a443-b4b37eed5a06</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>In theory, the above database update and <literal>nova
|
||||
reboot</literal> command are all that is required to recover a VM from a
|
||||
failed host. However, if further problems occur, consider looking at
|
||||
recreating the network filter configuration using <literal>virsh</literal>,
|
||||
restarting the Compute services or updating the <literal>vm_state</literal>
|
||||
and <literal>power_state</literal> in the Compute database.</para>
|
||||
</section>
|
||||
<section xml:id="section_nova-uid-mismatch">
|
||||
<title>Recover from a UID/GID mismatch</title>
|
||||
<para>When running OpenStack Compute, using a shared file system or an automated
|
||||
...</computeroutput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Recover the VM</title>
|
||||
<step>
|
||||
<para>After you have determined the status of the VM on the failed host, decide to
|
||||
which compute host the affected VM should be moved. For example, run the
|
||||
following database command to move the VM to
|
||||
<systemitem>np-rcc46</systemitem>:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06';</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>If using a hypervisor that relies on libvirt (such as KVM), it is a good idea
|
||||
to update the <literal>libvirt.xml</literal> file (found in
|
||||
<literal>/var/lib/nova/instances/[instance ID]</literal>). The important
|
||||
changes to make are:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Change the <literal>DHCPSERVER</literal> value to the host IP address
|
||||
of the compute host that is now the VM's new home.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update the VNC IP, if it isn't already updated, to:
|
||||
<literal>0.0.0.0</literal>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Reboot the VM:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova reboot --hard 3f57699a-e773-4650-a443-b4b37eed5a06</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>In theory, the above database update and <literal>nova reboot</literal> command are
|
||||
all that is required to recover a VM from a failed host. However, if further problems
|
||||
occur, consider looking at recreating the network filter configuration using
|
||||
<literal>virsh</literal>, restarting the Compute services or updating the
|
||||
<literal>vm_state</literal> and <literal>power_state</literal> in the Compute
|
||||
database.</para>
|
||||
</section>
|
||||
<section xml:id="section_nova-uid-mismatch">
|
||||
<title>Recover from a UID/GID mismatch</title>
|
||||
<para>When running OpenStack Compute, using a shared file system or an automated
|
||||
configuration tool, you could encounter a situation where some files on your compute
|
||||
node are using the wrong UID or GID. This causes a number of errors, such as being
|
||||
unable to do live migration or start virtual machines.</para>
|
||||
<para>The following procedure runs on <systemitem class="service"
|
||||
>nova-compute</systemitem> hosts, based on the KVM hypervisor, and could help to
|
||||
restore the situation:</para>
|
||||
<procedure>
|
||||
<title>To recover from a UID/GID mismatch</title>
|
||||
<step>
|
||||
<para>Ensure you do not use numbers that are already used for some other
|
||||
<para>The following procedure runs on <systemitem class="service">nova-compute</systemitem>
|
||||
hosts, based on the KVM hypervisor, and could help to restore the situation:</para>
|
||||
<procedure>
|
||||
<title>To recover from a UID/GID mismatch</title>
|
||||
<step>
|
||||
<para>Ensure you do not use numbers that are already used for some other
|
||||
user/group.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the nova uid in <filename>/etc/passwd</filename> to the same number in
|
||||
all hosts (for example, 112).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the libvirt-qemu uid in
|
||||
<filename>/etc/passwd</filename> to the
|
||||
same number in all hosts (for example,
|
||||
119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the nova group in
|
||||
<filename>/etc/group</filename> file to
|
||||
the same number in all hosts (for example,
|
||||
120).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the libvirtd group in
|
||||
<filename>/etc/group</filename> file to
|
||||
the same number in all hosts (for example,
|
||||
119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Stop the services on the compute
|
||||
node.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Change all the files owned by user <systemitem>nova</systemitem> or by
|
||||
group <systemitem>nova</systemitem>. For example:</para>
|
||||
<screen><prompt>#</prompt> <userinput>find / -uid 108 -exec chown nova {} \; </userinput># note the 108 here is the old nova uid before the change
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the nova uid in <filename>/etc/passwd</filename> to the same number in all
|
||||
hosts (for example, 112).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the libvirt-qemu uid in <filename>/etc/passwd</filename> to the same
|
||||
number in all hosts (for example, 119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the nova group in <filename>/etc/group</filename> file to the same number
|
||||
in all hosts (for example, 120).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the libvirtd group in <filename>/etc/group</filename> file to the same
|
||||
number in all hosts (for example, 119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Stop the services on the compute node.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Change all the files owned by user <systemitem>nova</systemitem> or by group
|
||||
<systemitem>nova</systemitem>. For example:</para>
|
||||
<screen><prompt>#</prompt> <userinput>find / -uid 108 -exec chown nova {} \; </userinput># note the 108 here is the old nova uid before the change
|
||||
<prompt>#</prompt> <userinput>find / -gid 120 -exec chgrp nova {} \;</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Repeat the steps for the libvirt-qemu owned files if those needed to
|
||||
</step>
|
||||
<step>
|
||||
<para>Repeat the steps for the libvirt-qemu owned files if those needed to
|
||||
change.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the services.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Now you can run the <command>find</command>
|
||||
command to verify that all files using the
|
||||
correct identifiers.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="section_nova-disaster-recovery-process">
|
||||
<title>Recover cloud after disaster</title>
|
||||
<para>Use the following procedures to manage your cloud after a disaster, and to easily
|
||||
back up its persistent storage volumes. Backups <emphasis role="bold">are</emphasis>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the services.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Now you can run the <command>find</command> command to verify that all files
|
||||
using the correct identifiers.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="section_nova-disaster-recovery-process">
|
||||
<title>Recover cloud after disaster</title>
|
||||
<para>Use the following procedures to manage your cloud after a disaster, and to easily back
|
||||
up its persistent storage volumes. Backups <emphasis role="bold">are</emphasis>
|
||||
mandatory, even outside of disaster scenarios.</para>
|
||||
<para>For a DRP definition, see <link
|
||||
<para>For a DRP definition, see <link
|
||||
xlink:href="http://en.wikipedia.org/wiki/Disaster_Recovery_Plan"
|
||||
>http://en.wikipedia.org/wiki/Disaster_Recovery_Plan</link>.</para>
|
||||
<simplesect>
|
||||
<title>Disaster recovery example</title>
|
||||
<para>A disaster could happen to several components of your architecture (for
|
||||
example, a disk crash, a network loss, or a power cut). In this example, the
|
||||
following components are configured:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>A cloud controller (<systemitem>nova-api</systemitem>,
|
||||
<simplesect>
|
||||
<title>Disaster recovery example</title>
|
||||
<para>A disaster could happen to several components of your architecture (for example, a
|
||||
disk crash, a network loss, or a power cut). In this example, the following
|
||||
components are configured:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>A cloud controller (<systemitem>nova-api</systemitem>,
|
||||
<systemitem>nova-objectstore</systemitem>,
|
||||
<systemitem>nova-network</systemitem>)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A compute node (<systemitem
|
||||
class="service"
|
||||
>nova-compute</systemitem>)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A Storage Area Network (SAN) used by OpenStack Block Storage
|
||||
(<systemitem class="service">cinder-volumes</systemitem>)</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>The worst disaster for a cloud is a power loss, which applies to all three
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A compute node (<systemitem class="service"
|
||||
>nova-compute</systemitem>)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A Storage Area Network (SAN) used by OpenStack Block Storage (<systemitem
|
||||
class="service">cinder-volumes</systemitem>)</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>The worst disaster for a cloud is a power loss, which applies to all three
|
||||
components. Before a power loss:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>From the SAN to the cloud controller, we have an active iSCSI session
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>From the SAN to the cloud controller, we have an active iSCSI session
|
||||
(used for the "cinder-volumes" LVM's VG).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, we also have active
|
||||
iSCSI sessions (managed by <systemitem class="service"
|
||||
>cinder-volume</systemitem>).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For every volume, an iSCSI session is made (so 14 ebs volumes equals
|
||||
14 sessions).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, we also have iptables/
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, we also have active iSCSI
|
||||
sessions (managed by <systemitem class="service"
|
||||
>cinder-volume</systemitem>).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For every volume, an iSCSI session is made (so 14 ebs volumes equals 14
|
||||
sessions).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, we also have iptables/
|
||||
ebtables rules, which allow access from the cloud controller to the running
|
||||
instance.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>And at least, from the cloud controller to the compute node; saved
|
||||
into database, the current state of the instances (in that case "running" ),
|
||||
and their volumes attachment (mount point, volume ID, volume status, and so
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>And at least, from the cloud controller to the compute node; saved into
|
||||
database, the current state of the instances (in that case "running" ), and
|
||||
their volumes attachment (mount point, volume ID, volume status, and so
|
||||
on.)</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>After the power loss occurs and all hardware components restart:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>From the SAN to the cloud, the iSCSI session no longer exists.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, the iSCSI sessions no
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>After the power loss occurs and all hardware components restart:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>From the SAN to the cloud, the iSCSI session no longer exists.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, the iSCSI sessions no
|
||||
longer exist.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, the iptables and
|
||||
ebtables are recreated, since at boot, <systemitem>nova-network</systemitem>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller to the compute node, the iptables and ebtables
|
||||
are recreated, since at boot, <systemitem>nova-network</systemitem>
|
||||
reapplies configurations.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller, instances are in a shutdown state (because
|
||||
they are no longer running).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In the database, data was not updated at all, since Compute could not
|
||||
have anticipated the crash.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Before going further, and to prevent the administrator from making fatal
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>From the cloud controller, instances are in a shutdown state (because they
|
||||
are no longer running).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In the database, data was not updated at all, since Compute could not have
|
||||
anticipated the crash.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Before going further, and to prevent the administrator from making fatal
|
||||
mistakes,<emphasis role="bold"> instances won't be lost</emphasis>, because no
|
||||
"<command>destroy</command>" or "<command>terminate</command>" command was
|
||||
invoked, so the files for the instances remain on the compute node.</para>
|
||||
<para>Perform these tasks in the following order.
|
||||
<warning><para>Do not add any extra steps at this stage.</para></warning></para>
|
||||
<para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Get the current relation from a
|
||||
volume to its instance, so that you
|
||||
can recreate the attachment.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update the database to clean the
|
||||
stalled state. (After that, you cannot
|
||||
perform the first step).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart the instances. In other
|
||||
words, go from a shutdown to running
|
||||
state.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>After the restart, reattach the volumes to their respective
|
||||
instances (optional).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>SSH into the instances to reboot them.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Recover after a disaster</title>
|
||||
<procedure>
|
||||
<title>To perform disaster recovery</title>
|
||||
<step>
|
||||
<title>Get the instance-to-volume
|
||||
relationship</title>
|
||||
<para>You must determine the current relationship from a volume to its
|
||||
instance, because you will re-create the attachment.</para>
|
||||
<para>You can find this relationship by running <command>nova
|
||||
<para>Perform these tasks in the following order.</para>
|
||||
<warning>
|
||||
<para>Do not add any extra steps at this stage.</para>
|
||||
</warning>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Get the current relation from a volume to its instance, so that you can
|
||||
recreate the attachment.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update the database to clean the stalled state. (After that, you cannot
|
||||
perform the first step).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart the instances. In other words, go from a shutdown to running
|
||||
state.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>After the restart, reattach the volumes to their respective instances
|
||||
(optional).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>SSH into the instances to reboot them.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Recover after a disaster</title>
|
||||
<procedure>
|
||||
<title>To perform disaster recovery</title>
|
||||
<step>
|
||||
<title>Get the instance-to-volume relationship</title>
|
||||
<para>You must determine the current relationship from a volume to its instance,
|
||||
because you will re-create the attachment.</para>
|
||||
<para>You can find this relationship by running <command>nova
|
||||
volume-list</command>. Note that the <command>nova</command> client
|
||||
includes the ability to get volume information from OpenStack Block
|
||||
Storage.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Update the database</title>
|
||||
<para>Update the database to clean the stalled state. You must restore for
|
||||
every volume, using these queries to clean up the database:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>use cinder;</userinput>
|
||||
</step>
|
||||
<step>
|
||||
<title>Update the database</title>
|
||||
<para>Update the database to clean the stalled state. You must restore for every
|
||||
volume, using these queries to clean up the database:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>use cinder;</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set mountpoint=NULL;</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set status="available" where status <>"error_deleting";</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set attach_status="detached";</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set instance_id=0;</userinput></screen>
|
||||
<para>You can then run <command>nova volume-list</command> commands to list
|
||||
all volumes.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Restart instances</title>
|
||||
<para>Restart the instances using the <command>nova reboot
|
||||
<replaceable>$instance</replaceable></command> command.</para>
|
||||
<para>At this stage, depending on your image, some instances completely
|
||||
reboot and become reachable, while others stop on the "plymouth"
|
||||
stage.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>DO NOT reboot a second time</title>
|
||||
<para>Do not reboot instances that are stopped at this point. Instance state
|
||||
<para>You can then run <command>nova volume-list</command> commands to list all
|
||||
volumes.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Restart instances</title>
|
||||
<para>Restart the instances using the <command>nova reboot
|
||||
<replaceable>INSTANCE</replaceable></command> command.</para>
|
||||
<para>At this stage, depending on your image, some instances completely reboot
|
||||
and become reachable, while others stop on the "plymouth" stage.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>DO NOT reboot a second time</title>
|
||||
<para>Do not reboot instances that are stopped at this point. Instance state
|
||||
depends on whether you added an <filename>/etc/fstab</filename> entry for
|
||||
that volume. Images built with the <package>cloud-init</package> package
|
||||
remain in a pending state, while others skip the missing volume and start.
|
||||
@ -310,14 +290,14 @@
|
||||
<package>cloud-init</package>, see <link
|
||||
xlink:href="https://help.ubuntu.com/community/CloudInit"
|
||||
>help.ubuntu.com/community/CloudInit</link>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Reattach volumes</title>
|
||||
<para>After the restart, and Compute has restored the right status, you can
|
||||
</step>
|
||||
<step>
|
||||
<title>Reattach volumes</title>
|
||||
<para>After the restart, and Compute has restored the right status, you can
|
||||
reattach the volumes to their respective instances using the <command>nova
|
||||
volume-attach</command> command. The following snippet uses a file of
|
||||
listed volumes to reattach them:</para>
|
||||
<programlisting language="bash">#!/bin/bash
|
||||
<programlisting language="bash">#!/bin/bash
|
||||
|
||||
while read line; do
|
||||
volume=`echo $line | $CUT -f 1 -d " "`
|
||||
@ -327,22 +307,21 @@ while read line; do
|
||||
nova volume-attach $instance $volume $mount_point
|
||||
sleep 2
|
||||
done < $volumes_tmp_file</programlisting>
|
||||
<para>At this stage, instances that were pending on the boot sequence
|
||||
<para>At this stage, instances that were pending on the boot sequence
|
||||
(<application>plymouth</application>) automatically continue their boot,
|
||||
and restart normally, while the ones that booted see the volume.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>SSH into instances</title>
|
||||
<para>If some services depend on the volume, or if a volume has an entry
|
||||
into <systemitem>fstab</systemitem>, you should now simply restart the
|
||||
</step>
|
||||
<step>
|
||||
<title>SSH into instances</title>
|
||||
<para>If some services depend on the volume, or if a volume has an entry into
|
||||
<systemitem>fstab</systemitem>, you should now simply restart the
|
||||
instance. This restart needs to be made from the instance itself, not
|
||||
through <command>nova</command>.</para>
|
||||
<para>SSH into the instance and perform a reboot:</para>
|
||||
<screen><prompt>#</prompt> <userinput>shutdown -r now</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>By completing this procedure, you can
|
||||
successfully recover your cloud.</para>
|
||||
<screen><prompt>#</prompt> <userinput>shutdown -r now</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>By completing this procedure, you can successfully recover your cloud.</para>
|
||||
<note>
|
||||
<para>Follow these guidelines:</para>
|
||||
<itemizedlist>
|
||||
@ -376,30 +355,41 @@ done < $volumes_tmp_file</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Script the DRP</title>
|
||||
<para>You can download from <link
|
||||
xlink:href="https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5006_V00_NUAC-OPENSTACK-DRP-OpenStack.sh"
|
||||
>here</link> a bash script which performs the following steps:</para>
|
||||
<orderedlist>
|
||||
<listitem><para>An array is created for instances and their attached volumes.</para></listitem>
|
||||
<listitem><para>The MySQL database is updated.</para></listitem>
|
||||
<listitem><para>Using <systemitem>euca2ools</systemitem>, all instances are restarted.</para></listitem>
|
||||
<listitem><para>The volume attachment is made.</para></listitem>
|
||||
<listitem><para>An SSH connection is performed into every instance using Compute credentials.</para></listitem>
|
||||
</orderedlist>
|
||||
<para>The "test mode" allows you to perform
|
||||
that whole sequence for only one
|
||||
instance.</para>
|
||||
<para>To reproduce the power loss, connect to the compute node which runs
|
||||
that same instance and close the iSCSI session. Do not detach the volume using the <command>nova
|
||||
volume-detach</command> command; instead, manually close the iSCSI session. For the following
|
||||
example command uses an iSCSI session with the number 15:</para>
|
||||
<screen><prompt>#</prompt> <userinput>iscsiadm -m session -u -r 15</userinput></screen>
|
||||
<para>Do not forget the <literal>-r</literal>
|
||||
flag. Otherwise, you close ALL
|
||||
sessions.</para>
|
||||
</simplesect>
|
||||
</section>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Script the DRP</title>
|
||||
<para>You can download from <link
|
||||
xlink:href="https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5006_V00_NUAC-OPENSTACK-DRP-OpenStack.sh"
|
||||
>here</link> a bash script which performs the following steps:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>An array is created for instances and their attached volumes.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The MySQL database is updated.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Using <systemitem>euca2ools</systemitem>, all instances are
|
||||
restarted.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The volume attachment is made.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>An SSH connection is performed into every instance using Compute
|
||||
credentials.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>The "test mode" allows you to perform that whole sequence for only one
|
||||
instance.</para>
|
||||
<para>To reproduce the power loss, connect to the compute node which runs that same
|
||||
instance and close the iSCSI session. Do not detach the volume using the
|
||||
<command>nova volume-detach</command> command; instead, manually close the iSCSI
|
||||
session. For the following example command uses an iSCSI session with the number
|
||||
15:</para>
|
||||
<screen><prompt>#</prompt> <userinput>iscsiadm -m session -u -r 15</userinput></screen>
|
||||
<para>Do not forget the <literal>-r</literal> flag. Otherwise, you close ALL
|
||||
sessions.</para>
|
||||
</simplesect>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -1,42 +1,34 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section-compute-security">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section-compute-security">
|
||||
<title>Security hardening</title>
|
||||
<para>OpenStack Compute can be integrated with various third-party
|
||||
technologies to increase security. For more information, see the
|
||||
<link xlink:href="http://docs.openstack.org/sec/"><citetitle>OpenStack
|
||||
Security Guide</citetitle></link>.</para>
|
||||
<para>You can integrate OpenStack Compute with various third-party technologies to increase
|
||||
security. For information, see the <link xlink:href="http://docs.openstack.org/sec/"
|
||||
><citetitle>OpenStack Security Guide</citetitle></link>.</para>
|
||||
<xi:include href="../../common/section_trusted-compute-pools.xml"/>
|
||||
<section xml:id="section_compute_metadata_https">
|
||||
<title>Encrypt Compute metadata traffic</title>
|
||||
<para>OpenStack Juno supports encrypting Compute metadata traffic with
|
||||
HTTPS. Enable SSL encryption in the <filename>metadata_agent.ini</filename>
|
||||
file:<orderedlist>
|
||||
<listitem>
|
||||
<para>Enable the HTTPS protocol:
|
||||
<programlisting>nova_metadata_protocol = https</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Determine whether insecure SSL connections are accepted for
|
||||
Compute metadata server requests. The default value is
|
||||
<option>False</option>:
|
||||
<programlisting>nova_metadata_insecure = False</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Specify the path to the client certificate:
|
||||
<programlisting>
|
||||
nova_client_cert = <replaceable>PATH_TO_CERT</replaceable></programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Specify the path to the private key:
|
||||
<programlisting>
|
||||
nova_client_priv_key = <replaceable>PATH_TO_KEY</replaceable></programlisting></para>
|
||||
</listitem>
|
||||
</orderedlist></para>
|
||||
<para>OpenStack Juno supports encrypting Compute metadata traffic with HTTPS. You enable SSL
|
||||
encryption in the <filename>metadata_agent.ini</filename> file.</para>
|
||||
<procedure>
|
||||
<title>To enable SSL encryption</title>
|
||||
<step>
|
||||
<para>Enable the HTTPS protocol:</para>
|
||||
<programlisting>nova_metadata_protocol = https</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Determine whether insecure SSL connections are accepted for Compute metadata server
|
||||
requests. The default value is <option>False</option>:</para>
|
||||
<programlisting>nova_metadata_insecure = False</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Specify the path to the client certificate:</para>
|
||||
<programlisting>nova_client_cert = <replaceable>PATH_TO_CERT</replaceable></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Specify the path to the private key:</para>
|
||||
<programlisting>nova_client_priv_key = <replaceable>PATH_TO_KEY</replaceable></programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
|
@ -1,18 +1,13 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_compute-system-admin">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_compute-system-admin">
|
||||
<title>System administration</title>
|
||||
<para>By understanding how the different installed nodes
|
||||
interact with each other, you can administer the Compute
|
||||
installation. Compute offers many ways to install using
|
||||
multiple servers but the general idea is that you can have
|
||||
multiple compute nodes that control the virtual servers
|
||||
and a cloud controller node that contains the remaining
|
||||
Compute services.</para>
|
||||
<para>The Compute cloud works through the interaction of a series of daemon processes named
|
||||
<para>By understanding how the different installed nodes interact with each other, you can
|
||||
administer the Compute installation. Compute offers many ways to install using multiple
|
||||
servers but the general idea is that you can have multiple compute nodes that control the
|
||||
virtual servers and a cloud controller node that contains the remaining Compute
|
||||
services.</para>
|
||||
<para>The Compute cloud works through the interaction of a series of daemon processes named
|
||||
<systemitem>nova-*</systemitem> that reside persistently on the host machine or
|
||||
machines. These binaries can all run on the same machine or be spread out on multiple boxes
|
||||
in a large deployment. The responsibilities of services and drivers are:</para>
|
||||
@ -22,7 +17,7 @@
|
||||
<para>Services:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><systemitem class="service">nova-api</systemitem>. Receives xml
|
||||
<para><systemitem class="service">nova-api</systemitem>. Receives XML
|
||||
requests and sends them to the rest of the system. It is a wsgi app that
|
||||
routes and authenticate requests. It supports the EC2 and OpenStack
|
||||
APIs. There is a <filename>nova-api.conf</filename> file created when
|
||||
@ -65,8 +60,8 @@
|
||||
other is specified).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-scheduler</systemitem>. Dispatches requests for
|
||||
new virtual machines to the correct node.</para>
|
||||
<para><systemitem>nova-scheduler</systemitem>. Dispatches requests for new
|
||||
virtual machines to the correct node.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-novncproxy</systemitem>. Provides a VNC proxy for
|
||||
@ -81,44 +76,38 @@
|
||||
talk. <systemitem>nova-network</systemitem> and
|
||||
<systemitem>nova-scheduler</systemitem> also have drivers.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<section xml:id="section_manage-compute-users">
|
||||
<title>Manage Compute users</title>
|
||||
<para>Access to the Euca2ools (ec2) API is controlled by
|
||||
an access and secret key. The user’s access key needs
|
||||
to be included in the request, and the request must be
|
||||
signed with the secret key. Upon receipt of API
|
||||
requests, Compute verifies the signature and runs
|
||||
commands on behalf of the user.</para>
|
||||
<para>To begin using Compute, you must create a user with
|
||||
the Identity Service.</para>
|
||||
</section>
|
||||
<xi:include href="../../common/section_cli_nova_volumes.xml"/>
|
||||
<xi:include href="../../common/section_cli_nova_customize_flavors.xml"/>
|
||||
<section xml:id="section_manage-compute-users">
|
||||
<title>Manage Compute users</title>
|
||||
<para>Access to the Euca2ools (ec2) API is controlled by an access and secret key. The
|
||||
user’s access key needs to be included in the request, and the request must be signed
|
||||
with the secret key. Upon receipt of API requests, Compute verifies the signature and
|
||||
runs commands on behalf of the user.</para>
|
||||
<para>To begin using Compute, you must create a user with the Identity Service.</para>
|
||||
</section>
|
||||
<xi:include href="../../common/section_cli_nova_volumes.xml"/>
|
||||
<xi:include href="../../common/section_cli_nova_customize_flavors.xml"/>
|
||||
<xi:include href="../../common/section_compute_config-firewalls.xml"/>
|
||||
<section xml:id="admin-password-injection">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Inject administrator password</title>
|
||||
<para>You can configure Compute to generate a random administrator (root) password and
|
||||
inject that password into the instance. If this feature is enabled, a user can
|
||||
<command>ssh</command> to an instance without an <command>ssh</command> keypair. The
|
||||
<command>ssh</command> to an instance without an <command>ssh</command> keypair. The
|
||||
random password appears in the output of the <command>nova boot</command> command. You
|
||||
can also view and set the <literal>admin</literal> password from the dashboard.</para>
|
||||
<simplesect>
|
||||
<title>Dashboard</title>
|
||||
<para>The dashboard is configured by default to display the <literal>admin</literal>
|
||||
password and allow the user to modify it.</para>
|
||||
<para>If you do not want to support password injection, we
|
||||
recommend disabling the password fields by editing
|
||||
your Dashboard <filename>local_settings</filename>
|
||||
file (file location will vary by Linux distribution,
|
||||
on Fedora/RHEL/CentOS: <filename>
|
||||
/etc/openstack-dashboard/local_settings</filename>,
|
||||
on Ubuntu and Debian:
|
||||
<filename>/etc/openstack-dashboard/local_settings.py</filename>
|
||||
and on openSUSE and SUSE Linux Enterprise Server:
|
||||
<filename>/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py</filename>)
|
||||
<para>If you do not want to support password injection, we recommend disabling the
|
||||
password fields by editing your Dashboard <filename>local_settings</filename> file
|
||||
(file location will vary by Linux distribution, on Fedora/RHEL/CentOS: <filename>
|
||||
/etc/openstack-dashboard/local_settings</filename>, on Ubuntu and Debian:
|
||||
<filename>/etc/openstack-dashboard/local_settings.py</filename> and on openSUSE
|
||||
and SUSE Linux Enterprise Server:
|
||||
<filename>/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py</filename>)
|
||||
<programlisting language="ini">OPENSTACK_HYPERVISOR_FEATURE = {
|
||||
...
|
||||
'can_set_password': False,
|
||||
@ -128,27 +117,24 @@
|
||||
<title>Libvirt-based hypervisors (KVM, QEMU, LXC)</title>
|
||||
<para>For hypervisors such as KVM that use the libvirt backend, <literal>admin</literal>
|
||||
password injection is disabled by default. To enable it, set the following option in
|
||||
<filename>/etc/nova/nova.conf</filename>:</para>
|
||||
<filename>/etc/nova/nova.conf</filename>:</para>
|
||||
<para>
|
||||
<programlisting language="ini">[libvirt]
|
||||
inject_password=true</programlisting>
|
||||
</para>
|
||||
<para>When enabled, Compute will modify the password of
|
||||
the root account by editing the
|
||||
<filename>/etc/shadow</filename> file inside of
|
||||
the virtual machine instance.</para>
|
||||
<para>When enabled, Compute will modify the password of the root account by editing the
|
||||
<filename>/etc/shadow</filename> file inside of the virtual machine
|
||||
instance.</para>
|
||||
<note>
|
||||
<para>Users can only ssh to the instance by using the
|
||||
admin password if:</para>
|
||||
<para>Users can only ssh to the instance by using the admin password if:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The virtual machine image is a Linux
|
||||
distribution</para>
|
||||
<para>The virtual machine image is a Linux distribution</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The virtual machine has been configured to allow users to
|
||||
<command>ssh</command> as the root user. This is not the case for
|
||||
<link xlink:href="http://cloud-images.ubuntu.com/">Ubuntu cloud
|
||||
<command>ssh</command> as the root user. This is not the case for
|
||||
<link xlink:href="http://cloud-images.ubuntu.com/">Ubuntu cloud
|
||||
images</link>, which disallow <command>ssh</command> to the root
|
||||
account by default.</para>
|
||||
</listitem>
|
||||
@ -167,103 +153,102 @@ inject_password=true</programlisting>
|
||||
must configure the Windows image to retrieve the <literal>admin</literal> password
|
||||
on boot by installing an agent such as <link
|
||||
xlink:href="https://github.com/cloudbase/cloudbase-init"
|
||||
>cloudbase-init</link>.</para>
|
||||
>cloudbase-init</link>.</para>
|
||||
</simplesect>
|
||||
</section>
|
||||
<section xml:id="section_manage-the-cloud">
|
||||
<title>Manage the cloud</title>
|
||||
<para>A system administrator can use the <command>nova</command> client and the
|
||||
<section xml:id="section_manage-the-cloud">
|
||||
<title>Manage the cloud</title>
|
||||
<para>A system administrator can use the <command>nova</command> client and the
|
||||
<command>Euca2ools</command> commands to manage the cloud.</para>
|
||||
<para>Both nova client and euca2ools can be used by all users, though specific commands
|
||||
<para>Both nova client and euca2ools can be used by all users, though specific commands
|
||||
might be restricted by Role Based Access Control in the Identity Service.</para>
|
||||
<procedure>
|
||||
<title>To use the nova client</title>
|
||||
<step>
|
||||
<para>Installing the <package>python-novaclient</package> package gives you a
|
||||
<procedure>
|
||||
<title>To use the nova client</title>
|
||||
<step>
|
||||
<para>Installing the <package>python-novaclient</package> package gives you a
|
||||
<code>nova</code> shell command that enables Compute API interactions from
|
||||
the command line. Install the client, and then provide your user name and
|
||||
password (typically set as environment variables for convenience), and then you
|
||||
have the ability to send commands to your cloud on the command line.</para>
|
||||
<para>To install <package>python-novaclient</package>, download the tarball from
|
||||
<para>To install <package>python-novaclient</package>, download the tarball from
|
||||
<link
|
||||
xlink:href="http://pypi.python.org/pypi/python-novaclient/2.6.3#downloads"
|
||||
>http://pypi.python.org/pypi/python-novaclient/2.6.3#downloads</link> and
|
||||
then install it in your favorite python environment.</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl -O http://pypi.python.org/packages/source/p/python-novaclient/python-novaclient-2.6.3.tar.gz</userinput>
|
||||
<screen><prompt>$</prompt> <userinput>curl -O http://pypi.python.org/packages/source/p/python-novaclient/python-novaclient-2.6.3.tar.gz</userinput>
|
||||
<prompt>$</prompt> <userinput>tar -zxvf python-novaclient-2.6.3.tar.gz</userinput>
|
||||
<prompt>$</prompt> <userinput>cd python-novaclient-2.6.3</userinput></screen>
|
||||
<para>As <systemitem class="username">root</systemitem> execute:</para>
|
||||
<screen><prompt>#</prompt> <userinput>python setup.py install</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Confirm the installation by running:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help</userinput>
|
||||
<para>As <systemitem class="username">root</systemitem>, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>python setup.py install</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Confirm the installation:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help</userinput>
|
||||
<computeroutput>usage: nova [--version] [--debug] [--os-cache] [--timings]
|
||||
[--timeout <seconds>] [--os-username <auth-user-name>]
|
||||
[--os-password <auth-password>]
|
||||
[--os-tenant-name <auth-tenant-name>]
|
||||
[--os-tenant-id <auth-tenant-id>] [--os-auth-url <auth-url>]
|
||||
[--os-region-name <region-name>] [--os-auth-system <auth-system>]
|
||||
[--service-type <service-type>] [--service-name <service-name>]
|
||||
[--volume-service-name <volume-service-name>]
|
||||
[--endpoint-type <endpoint-type>]
|
||||
[--os-compute-api-version <compute-api-ver>]
|
||||
[--os-cacert <ca-certificate>] [--insecure]
|
||||
[--bypass-url <bypass-url>]
|
||||
<subcommand> ...</computeroutput></screen>
|
||||
<note><para>This command returns a list of <command>nova</command> commands and parameters. To obtain help
|
||||
for a subcommand, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help <replaceable>subcommand</replaceable></userinput></screen>
|
||||
<para>You can also refer to the <link
|
||||
[--timeout <replaceable>SECONDS</replaceable>] [--os-username <replaceable>AUTH_USER_NAME</replaceable>]
|
||||
[--os-password <replaceable>AUTH_PASSWORD</replaceable>]
|
||||
[--os-tenant-name <replaceable>AUTH_TENANT_NAME</replaceable>]
|
||||
[--os-tenant-id <replaceable>AUTH_TENANT_ID</replaceable>] [--os-auth-url <replaceable>AUTH_URL</replaceable>]
|
||||
[--os-region-name <replaceable>REGION_NAME</replaceable>] [--os-auth-system <replaceable>AUTH_SYSTEM</replaceable>]
|
||||
[--service-type <replaceable>SERVICE_TYPE</replaceable>] [--service-name <replaceable>SERVICE_NAME</replaceable>]
|
||||
[--volume-service-name <replaceable>VOLUME_SERVICE_NAME</replaceable>]
|
||||
[--endpoint-type <replaceable>ENDPOINT_TYPE</replaceable>]
|
||||
[--os-compute-api-version <replaceable>COMPUTE_API_VERSION</replaceable>]
|
||||
[--os-cacert <replaceable>CA_CERTIFICATE</replaceable>] [--insecure]
|
||||
[--bypass-url <replaceable>BYPASS_URL</replaceable>]
|
||||
<replaceable>SUBCOMMAND</replaceable> ...</computeroutput></screen>
|
||||
<note>
|
||||
<para>This command returns a list of <command>nova</command> commands and
|
||||
parameters. To obtain help for a subcommand, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help <replaceable>SUBCOMMAND</replaceable></userinput></screen>
|
||||
<para>You can also refer to the <link
|
||||
xlink:href="http://docs.openstack.org/cli-reference/content/">
|
||||
<citetitle>OpenStack Command-Line Reference</citetitle></link>
|
||||
for a complete listing of <command>nova</command>
|
||||
commands and parameters.</para></note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the required parameters as environment variables to make running
|
||||
commands easier. For example, you can add <parameter>--os-username</parameter>
|
||||
as a <command>nova</command> option, or set it as an environment variable. To
|
||||
set the user name, password, and tenant as environment variables, use:</para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_USERNAME=joecool</userinput>
|
||||
<citetitle>OpenStack Command-Line Reference</citetitle></link> for a
|
||||
complete listing of <command>nova</command> commands and parameters.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the required parameters as environment variables to make running commands
|
||||
easier. For example, you can add <parameter>--os-username</parameter> as a
|
||||
<command>nova</command> option, or set it as an environment variable. To set
|
||||
the user name, password, and tenant as environment variables, use:</para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_USERNAME=joecool</userinput>
|
||||
<prompt>$</prompt> <userinput>export OS_PASSWORD=coolword</userinput>
|
||||
<prompt>$</prompt> <userinput>export OS_TENANT_NAME=coolu</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Using the Identity Service, you are supplied with an authentication
|
||||
endpoint, which Compute recognizes as the <literal>OS_AUTH_URL</literal>.</para>
|
||||
<para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_AUTH_URL=http://hostname:5000/v2.0</userinput>
|
||||
</step>
|
||||
<step>
|
||||
<para>Using the Identity Service, you are supplied with an authentication endpoint,
|
||||
which Compute recognizes as the <literal>OS_AUTH_URL</literal>.</para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_AUTH_URL=http://hostname:5000/v2.0</userinput>
|
||||
<prompt>$</prompt> <userinput>export NOVA_VERSION=1.1</userinput></screen>
|
||||
</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<section xml:id="section_euca2ools">
|
||||
<title>Use the euca2ools commands</title>
|
||||
<para>For a command-line interface to EC2 API calls, use the
|
||||
</step>
|
||||
</procedure>
|
||||
<section xml:id="section_euca2ools">
|
||||
<title>Use the euca2ools commands</title>
|
||||
<para>For a command-line interface to EC2 API calls, use the
|
||||
<command>euca2ools</command> command-line tool. See <link
|
||||
xlink:href="http://open.eucalyptus.com/wiki/Euca2oolsGuide_v1.3"
|
||||
>http://open.eucalyptus.com/wiki/Euca2oolsGuide_v1.3</link></para>
|
||||
</section>
|
||||
<xi:include href="../../common/section_cli_nova_usage_statistics.xml"/>
|
||||
</section>
|
||||
<section xml:id="section_manage-logs">
|
||||
<title>Manage logs</title>
|
||||
<simplesect>
|
||||
<title>Logging module</title>
|
||||
<xi:include href="../../common/section_cli_nova_usage_statistics.xml"/>
|
||||
</section>
|
||||
<section xml:id="section_manage-logs">
|
||||
<title>Manage logs</title>
|
||||
<simplesect>
|
||||
<title>Logging module</title>
|
||||
<para>To specify a configuration file to change the logging behavior, add this line to
|
||||
the <filename>/etc/nova/nova.conf</filename> file . To change the logging level,
|
||||
such as <literal>DEBUG</literal>, <literal>INFO</literal>,
|
||||
<literal>WARNING</literal>, <literal>ERROR</literal>), use:
|
||||
<programlisting language="ini">log-config=/etc/nova/logging.conf</programlisting></para>
|
||||
<para>The logging configuration file is an ini-style configuration file, which must
|
||||
<para>The logging configuration file is an ini-style configuration file, which must
|
||||
contain a section called <literal>logger_nova</literal>, which controls the behavior
|
||||
of the logging facility in the <literal>nova-*</literal> services. For
|
||||
example:<programlisting language="ini">[logger_nova]
|
||||
level = INFO
|
||||
handlers = stderr
|
||||
qualname = nova</programlisting></para>
|
||||
<para>This example sets the debugging level to <literal>INFO</literal> (which less
|
||||
<para>This example sets the debugging level to <literal>INFO</literal> (which less
|
||||
verbose than the default <literal>DEBUG</literal> setting). <itemizedlist>
|
||||
<listitem>
|
||||
<para>For more details on the logging configuration syntax, including the
|
||||
@ -275,45 +260,46 @@ qualname = nova</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For an example <filename>logging.conf</filename> file with various
|
||||
defined handlers, see the
|
||||
<link xlink:href="http://docs.openstack.org/trunk/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>.</para>
|
||||
defined handlers, see the <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration
|
||||
Reference</citetitle></link>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Syslog</title>
|
||||
<para>You can configure OpenStack Compute services to send logging information to
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Syslog</title>
|
||||
<para>You can configure OpenStack Compute services to send logging information to
|
||||
<systemitem>syslog</systemitem>. This is useful if you want to use
|
||||
<systemitem>rsyslog</systemitem>, which forwards the logs to a remote machine.
|
||||
You need to separately configure the Compute service (nova), the Identity service
|
||||
(keystone), the Image Service (glance), and, if you are using it, the Block Storage
|
||||
service (cinder) to send log messages to <systemitem>syslog</systemitem>. To do so,
|
||||
add the following lines to:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><filename>/etc/nova/nova.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/keystone/keystone.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-api.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-registry.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/cinder/cinder.conf</filename></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<programlisting language="ini">verbose = False
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><filename>/etc/nova/nova.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/keystone/keystone.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-api.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-registry.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/cinder/cinder.conf</filename></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<programlisting language="ini">verbose = False
|
||||
debug = False
|
||||
use_syslog = True
|
||||
syslog_log_facility = LOG_LOCAL0</programlisting>
|
||||
<para>In addition to enabling <systemitem>syslog</systemitem>, these settings also
|
||||
turn off more verbose output and debugging output from the log.<note>
|
||||
<para>In addition to enabling <systemitem>syslog</systemitem>, these settings also turn
|
||||
off more verbose output and debugging output from the log.<note>
|
||||
<para>Although the example above uses the same local facility for each service
|
||||
(<literal>LOG_LOCAL0</literal>, which corresponds to
|
||||
<systemitem>syslog</systemitem> facility <literal>LOCAL0</literal>), we
|
||||
@ -325,62 +311,63 @@ syslog_log_facility = LOG_LOCAL0</programlisting>
|
||||
For more details, see the <systemitem>syslog</systemitem>
|
||||
documentation.</para>
|
||||
</note></para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Rsyslog</title>
|
||||
<para><systemitem>rsyslog</systemitem> is a useful tool for setting up a centralized
|
||||
log server across multiple machines. We briefly describe the configuration to set up
|
||||
an <systemitem>rsyslog</systemitem> server; a full treatment of
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Rsyslog</title>
|
||||
<para><systemitem>rsyslog</systemitem> is a useful tool for setting up a centralized log
|
||||
server across multiple machines. We briefly describe the configuration to set up an
|
||||
<systemitem>rsyslog</systemitem> server; a full treatment of
|
||||
<systemitem>rsyslog</systemitem> is beyond the scope of this document. We assume
|
||||
<systemitem>rsyslog</systemitem> has already been installed on your hosts
|
||||
(default for most Linux distributions).</para>
|
||||
<para>This example provides a minimal configuration for
|
||||
<para>This example provides a minimal configuration for
|
||||
<filename>/etc/rsyslog.conf</filename> on the log server host, which receives
|
||||
the log files:</para>
|
||||
<programlisting language="bash"># provides TCP syslog reception
|
||||
<programlisting language="bash"># provides TCP syslog reception
|
||||
$ModLoad imtcp
|
||||
$InputTCPServerRun 1024</programlisting>
|
||||
<para>Add a filter rule to <filename>/etc/rsyslog.conf</filename> which looks for a
|
||||
host name. The example below uses <replaceable>compute-01</replaceable> as an
|
||||
example of a compute host name:</para>
|
||||
<programlisting language="bash">:hostname, isequal, "<replaceable>compute-01</replaceable>" /mnt/rsyslog/logs/compute-01.log</programlisting>
|
||||
<para>On each compute host, create a file named
|
||||
<para>Add a filter rule to <filename>/etc/rsyslog.conf</filename> which looks for a host
|
||||
name. The example below uses <replaceable>COMPUTE_01</replaceable> as an example of
|
||||
a compute host name:</para>
|
||||
<programlisting language="bash">:hostname, isequal, "<replaceable>COMPUTE_01</replaceable>" /mnt/rsyslog/logs/compute-01.log</programlisting>
|
||||
<para>On each compute host, create a file named
|
||||
<filename>/etc/rsyslog.d/60-nova.conf</filename>, with the following
|
||||
content:</para>
|
||||
<programlisting language="bash"># prevent debug from dnsmasq with the daemon.none parameter
|
||||
<programlisting language="bash"># prevent debug from dnsmasq with the daemon.none parameter
|
||||
*.*;auth,authpriv.none,daemon.none,local0.none -/var/log/syslog
|
||||
# Specify a log level of ERROR
|
||||
local0.error @@172.20.1.43:1024</programlisting>
|
||||
<para>Once you have created this file, restart your <systemitem>rsyslog</systemitem>
|
||||
<para>Once you have created this file, restart your <systemitem>rsyslog</systemitem>
|
||||
daemon. Error-level log messages on the compute hosts should now be sent to your log
|
||||
server.</para>
|
||||
</simplesect>
|
||||
</section>
|
||||
<xi:include href="section_compute-rootwrap.xml"/>
|
||||
<xi:include href="section_compute-configure-migrations.xml"/>
|
||||
<section xml:id="section_live-migration-usage">
|
||||
<title>Migrate instances</title>
|
||||
<para>Before starting migrations, review the <link linkend="section_configuring-compute-migrations">Configure migrations section</link>.</para>
|
||||
<para>Migration provides a scheme to migrate running
|
||||
instances from one OpenStack Compute server to another
|
||||
OpenStack Compute server.</para>
|
||||
<procedure>
|
||||
<title>To migrate instances</title>
|
||||
<step>
|
||||
<para>Look at the running instances, to get the ID
|
||||
of the instance you wish to migrate.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova list</userinput>
|
||||
</simplesect>
|
||||
</section>
|
||||
<xi:include href="section_compute-rootwrap.xml"/>
|
||||
<xi:include href="section_compute-configure-migrations.xml"/>
|
||||
<section xml:id="section_live-migration-usage">
|
||||
<title>Migrate instances</title>
|
||||
<para>Before starting migrations, review the <link
|
||||
linkend="section_configuring-compute-migrations">Configure migrations
|
||||
section</link>.</para>
|
||||
<para>Migration provides a scheme to migrate running instances from one OpenStack Compute
|
||||
server to another OpenStack Compute server.</para>
|
||||
<procedure>
|
||||
<title>To migrate instances</title>
|
||||
<step>
|
||||
<para>Look at the running instances, to get the ID of the instance you wish to
|
||||
migrate.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova list</userinput>
|
||||
<computeroutput><![CDATA[+--------------------------------------+------+--------+-----------------+
|
||||
| ID | Name | Status |Networks |
|
||||
+--------------------------------------+------+--------+-----------------+
|
||||
| d1df1b5a-70c4-4fed-98b7-423362f2c47c | vm1 | ACTIVE | private=a.b.c.d |
|
||||
| d693db9e-a7cf-45ef-a7c9-b3ecb5f22645 | vm2 | ACTIVE | private=e.f.g.h |
|
||||
+--------------------------------------+------+--------+-----------------+]]></computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Look at information associated with that instance. This example uses 'vm1'
|
||||
</step>
|
||||
<step>
|
||||
<para>Look at information associated with that instance. This example uses 'vm1'
|
||||
from above.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova show d1df1b5a-70c4-4fed-98b7-423362f2c47c</userinput>
|
||||
<screen><prompt>$</prompt> <userinput>nova show d1df1b5a-70c4-4fed-98b7-423362f2c47c</userinput>
|
||||
<computeroutput><![CDATA[+-------------------------------------+----------------------------------------------------------+
|
||||
| Property | Value |
|
||||
+-------------------------------------+----------------------------------------------------------+
|
||||
@ -394,11 +381,11 @@ local0.error @@172.20.1.43:1024</programlisting>
|
||||
| status | ACTIVE |
|
||||
...
|
||||
+-------------------------------------+----------------------------------------------------------+]]></computeroutput></screen>
|
||||
<para>In this example, vm1 is running on HostB.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Select the server to which instances will be migrated:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova service-list</userinput>
|
||||
<para>In this example, vm1 is running on HostB.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Select the server to which instances will be migrated:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova service-list</userinput>
|
||||
<computeroutput>+------------------+------------+----------+---------+-------+----------------------------+-----------------+
|
||||
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
|
||||
+------------------+------------+----------+---------+-------+----------------------------+-----------------+
|
||||
@ -409,13 +396,11 @@ local0.error @@172.20.1.43:1024</programlisting>
|
||||
| nova-compute | HostC | nova | enabled | up | 2014-03-25T10:33:31.000000 | - |
|
||||
| nova-cert | HostA | internal | enabled | up | 2014-03-25T10:33:31.000000 | - |
|
||||
+------------------+-----------------------+----------+---------+-------+----------------------------+-----------------+</computeroutput></screen>
|
||||
<para>In this example, HostC can be picked up
|
||||
because <systemitem class="service">nova-compute</systemitem>
|
||||
is running on it.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Ensure that HostC has enough resources for
|
||||
migration.</para>
|
||||
<para>In this example, HostC can be picked up because <systemitem class="service"
|
||||
>nova-compute</systemitem> is running on it.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Ensure that HostC has enough resources for migration.</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova host-describe HostC</userinput>
|
||||
<computeroutput>+-----------+------------+-----+-----------+---------+
|
||||
| HOST | PROJECT | cpu | memory_mb | disk_gb |
|
||||
@ -426,65 +411,61 @@ local0.error @@172.20.1.43:1024</programlisting>
|
||||
| HostC | p1 | 13 | 21284 | 442 |
|
||||
| HostC | p2 | 13 | 21284 | 442 |
|
||||
+-----------+------------+-----+-----------+---------+</computeroutput></screen>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold"
|
||||
>cpu:</emphasis>the number of
|
||||
cpu</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">memory_mb:</emphasis>total amount of memory
|
||||
(in MB)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">disk_gb:</emphasis>total amount of space for
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">cpu:</emphasis>the number of cpu</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">memory_mb:</emphasis>total amount of memory (in
|
||||
MB)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">disk_gb:</emphasis>total amount of space for
|
||||
NOVA-INST-DIR/instances (in GB)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">1st line shows </emphasis>total amount of
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">1st line shows </emphasis>total amount of
|
||||
resources for the physical server.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">2nd line shows </emphasis>currently used
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">2nd line shows </emphasis>currently used
|
||||
resources.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">3rd line shows </emphasis>maximum used
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">3rd line shows </emphasis>maximum used
|
||||
resources.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">4th line and
|
||||
under</emphasis> shows the resource
|
||||
for each project.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use the <command>nova live-migration</command> command to migrate the
|
||||
instances:<screen><prompt>$</prompt> <userinput>nova live-migration <replaceable>server</replaceable> <replaceable>host_name</replaceable> </userinput></screen></para>
|
||||
<para>Where <replaceable>server</replaceable> can be either the server's ID or name.
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">4th line and under</emphasis> shows the resource
|
||||
for each project.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use the <command>nova live-migration</command> command to migrate the
|
||||
instances:<screen><prompt>$</prompt> <userinput>nova live-migration <replaceable>SERVER</replaceable> <replaceable>HOST_NAME</replaceable></userinput></screen></para>
|
||||
<para>Where <replaceable>SERVER</replaceable> can be the ID or name of the server.
|
||||
For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova live-migration d1df1b5a-70c4-4fed-98b7-423362f2c47c HostC</userinput><computeroutput>
|
||||
<screen><prompt>$</prompt> <userinput>nova live-migration d1df1b5a-70c4-4fed-98b7-423362f2c47c HostC</userinput><computeroutput>
|
||||
<![CDATA[Migration of d1df1b5a-70c4-4fed-98b7-423362f2c47c initiated.]]></computeroutput></screen>
|
||||
<para>Ensure instances are migrated successfully with <command>nova
|
||||
list</command>. If instances are still running on HostB, check log files
|
||||
(src/dest <systemitem class="service">nova-compute</systemitem> and <systemitem
|
||||
class="service">nova-scheduler</systemitem>) to determine why. <note>
|
||||
<para>Although the <command>nova</command> command is called
|
||||
<command>live-migration</command>, under the default Compute
|
||||
configuration options the instances are suspended before
|
||||
migration.</para>
|
||||
<para>For more details, see <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/configuring-openstack-compute-basics.html"
|
||||
>Configure migrations</link> in <citetitle>OpenStack Configuration
|
||||
Reference</citetitle>.</para>
|
||||
</note>
|
||||
</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<xi:include href="../../common/section_compute-configure-console.xml"/>
|
||||
<xi:include href="section_compute-configure-service-groups.xml"/>
|
||||
<xi:include href="section_compute-security.xml"/>
|
||||
<xi:include href="section_compute-recover-nodes.xml"/>
|
||||
<para>Ensure instances are migrated successfully with <command>nova list</command>.
|
||||
If instances are still running on HostB, check log files (src/dest <systemitem
|
||||
class="service">nova-compute</systemitem> and <systemitem class="service"
|
||||
>nova-scheduler</systemitem>) to determine why.</para>
|
||||
<note>
|
||||
<para>Although the <command>nova</command> command is called
|
||||
<command>live-migration</command>, under the default Compute
|
||||
configuration options the instances are suspended before migration.</para>
|
||||
<para>For more details, see <link
|
||||
xlink:href="http://docs.openstack.org/trunk/config-reference/content/configuring-openstack-compute-basics.html"
|
||||
>Configure migrations</link> in <citetitle>OpenStack Configuration
|
||||
Reference</citetitle>.</para>
|
||||
</note>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<xi:include href="../../common/section_compute-configure-console.xml"/>
|
||||
<xi:include href="section_compute-configure-service-groups.xml"/>
|
||||
<xi:include href="section_compute-security.xml"/>
|
||||
<xi:include href="section_compute-recover-nodes.xml"/>
|
||||
</section>
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,50 +1,36 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-use">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_networking-use">
|
||||
<title>Use Networking</title>
|
||||
<para>You can manage OpenStack Networking services by using the
|
||||
<systemitem>service</systemitem> command. For
|
||||
example:</para>
|
||||
<para>You can manage OpenStack Networking services by using the <systemitem>service</systemitem>
|
||||
command. For example:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server stop</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server status</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server start</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
<para>Log files are in the
|
||||
<systemitem>/var/log/neutron</systemitem>
|
||||
directory.</para>
|
||||
<para>Configuration files are in the
|
||||
<systemitem>/etc/neutron</systemitem> directory.</para>
|
||||
<para>Cloud administrators and tenants can use OpenStack
|
||||
Networking to build rich network topologies. Cloud
|
||||
administrators can create network connectivity on behalf of
|
||||
<para>Log files are in the <systemitem>/var/log/neutron</systemitem> directory.</para>
|
||||
<para>Configuration files are in the <systemitem>/etc/neutron</systemitem> directory.</para>
|
||||
<para>Cloud administrators and tenants can use OpenStack Networking to build rich network
|
||||
topologies. Cloud administrators can create network connectivity on behalf of
|
||||
tenants.</para>
|
||||
<!-- removed this line because there are no 'following procedures' -->
|
||||
<!--<para>A tenant or cloud administrator can both perform the
|
||||
following procedures.</para>-->
|
||||
<section xml:id="api_features">
|
||||
<title>Core Networking API features</title>
|
||||
<para>After you install and configure Networking, tenants and
|
||||
administrators can perform create-read-update-delete
|
||||
(CRUD) API networking operations by using the Networking
|
||||
API directly or neutron command-line interface (CLI). The
|
||||
neutron CLI is a wrapper around the Networking API. Every
|
||||
Networking API call has a corresponding neutron
|
||||
<para>After you install and configure Networking, tenants and administrators can perform
|
||||
create-read-update-delete (CRUD) API networking operations by using the Networking API
|
||||
directly or neutron command-line interface (CLI). The neutron CLI is a wrapper around
|
||||
the Networking API. Every Networking API call has a corresponding neutron
|
||||
command.</para>
|
||||
<para>The CLI includes a number of options. For details, see
|
||||
the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/"
|
||||
><citetitle>OpenStack End User
|
||||
Guide</citetitle></link>.</para>
|
||||
<para>The CLI includes a number of options. For details, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/"><citetitle>OpenStack End
|
||||
User Guide</citetitle></link>.</para>
|
||||
<section xml:id="api_abstractions">
|
||||
<title>API abstractions</title>
|
||||
<para>The Networking v2.0 API provides control over both
|
||||
L2 network topologies and their allocated IP addresses
|
||||
(IP Address Management or IPAM). There is also an
|
||||
extension to cover basic L3 forwarding and NAT, which
|
||||
provides capabilities similar to
|
||||
<para>The Networking v2.0 API provides control over both L2 network topologies and their
|
||||
allocated IP addresses (IP Address Management or IPAM). There is also an extension
|
||||
to cover basic L3 forwarding and NAT, which provides capabilities similar to
|
||||
<command>nova-network</command>.</para>
|
||||
<para><table rules="all">
|
||||
<caption>API abstractions</caption>
|
||||
@ -58,48 +44,34 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><emphasis role="bold"
|
||||
>Network</emphasis></td>
|
||||
<td>An isolated L2 network segment
|
||||
(similar to a VLAN) that forms the
|
||||
basis for describing the L2 network
|
||||
topology available in an Networking
|
||||
deployment.</td>
|
||||
<td><emphasis role="bold">Network</emphasis></td>
|
||||
<td>An isolated L2 network segment (similar to a VLAN) that forms the
|
||||
basis for describing the L2 network topology available in an
|
||||
Networking deployment.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold"
|
||||
>Subnet</emphasis></td>
|
||||
<td>Associates a block of IP addresses and
|
||||
other network configuration, such as
|
||||
default gateways or dns-servers, with
|
||||
an Networking network. Each subnet
|
||||
represents an IPv4 or IPv6 address
|
||||
block, and each Networking network can
|
||||
have multiple subnets.</td>
|
||||
<td><emphasis role="bold">Subnet</emphasis></td>
|
||||
<td>Associates a block of IP addresses and other network configuration,
|
||||
such as default gateways or dns-servers, with an Networking network.
|
||||
Each subnet represents an IPv4 or IPv6 address block, and each
|
||||
Networking network can have multiple subnets.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold"
|
||||
>Port</emphasis></td>
|
||||
<td>Represents an attachment port to a L2
|
||||
Networking network. When a port is
|
||||
created on the network, by default it
|
||||
is allocated an available fixed IP
|
||||
address out of one of the designated
|
||||
subnets for each IP version (if one
|
||||
exists). When the port is destroyed,
|
||||
its allocated addresses return to the
|
||||
pool of available IPs on the subnet.
|
||||
Users of the Networking API can either
|
||||
choose a specific IP address from the
|
||||
block, or let Networking choose the
|
||||
first available IP address.</td>
|
||||
<td><emphasis role="bold">Port</emphasis></td>
|
||||
<td>Represents an attachment port to a L2 Networking network. When a
|
||||
port is created on the network, by default it is allocated an
|
||||
available fixed IP address out of one of the designated subnets for
|
||||
each IP version (if one exists). When the port is destroyed, its
|
||||
allocated addresses return to the pool of available IPs on the
|
||||
subnet. Users of the Networking API can either choose a specific IP
|
||||
address from the block, or let Networking choose the first available
|
||||
IP address.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table></para>
|
||||
<?hard-pagebreak?>
|
||||
<para>This table summarizes the attributes available for
|
||||
each networking abstraction. For information about API
|
||||
abstraction and operations, see the <link
|
||||
<para>This table summarizes the attributes available for each networking abstraction.
|
||||
For information about API abstraction and operations, see the <link
|
||||
xlink:href="http://docs.openstack.org/api/openstack-network/2.0/content/"
|
||||
>Networking API v2.0 Reference</link>.</para>
|
||||
<table rules="all">
|
||||
@ -121,9 +93,8 @@
|
||||
<td><option>admin_state_up</option></td>
|
||||
<td>bool</td>
|
||||
<td>True</td>
|
||||
<td>Administrative state of the network. If
|
||||
specified as False (down), this network
|
||||
does not forward packets.</td>
|
||||
<td>Administrative state of the network. If specified as False (down), this
|
||||
network does not forward packets.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>id</option></td>
|
||||
@ -135,41 +106,35 @@
|
||||
<td><option>name</option></td>
|
||||
<td>string</td>
|
||||
<td>None</td>
|
||||
<td>Human-readable name for this network; is
|
||||
not required to be unique.</td>
|
||||
<td>Human-readable name for this network; is not required to be unique.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>shared</option></td>
|
||||
<td>bool</td>
|
||||
<td>False</td>
|
||||
<td>Specifies whether this network resource
|
||||
can be accessed by any tenant. The default
|
||||
policy setting restricts usage of this
|
||||
attribute to administrative users
|
||||
only.</td>
|
||||
<td>Specifies whether this network resource can be accessed by any tenant.
|
||||
The default policy setting restricts usage of this attribute to
|
||||
administrative users only.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>status</option></td>
|
||||
<td>string</td>
|
||||
<td>N/A</td>
|
||||
<td>Indicates whether this network is
|
||||
currently operational.</td>
|
||||
<td>Indicates whether this network is currently operational.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>subnets</option></td>
|
||||
<td>list(uuid-str)</td>
|
||||
<td>Empty list</td>
|
||||
<td>List of subnets associated with this
|
||||
network.</td>
|
||||
<td>List of subnets associated with this network.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>tenant_id</option></td>
|
||||
<td>uuid-str</td>
|
||||
<td>N/A</td>
|
||||
<td>Tenant owner of the network. Only
|
||||
administrative users can set the tenant
|
||||
identifier; this cannot be changed using
|
||||
authorization policies.</td>
|
||||
<td>Tenant owner of the network. Only administrative users can set the
|
||||
tenant identifier; this cannot be changed using authorization
|
||||
policies.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@ -191,12 +156,10 @@
|
||||
<tr>
|
||||
<td><option>allocation_pools</option></td>
|
||||
<td>list(dict)</td>
|
||||
<td>Every address in <option>cidr</option>,
|
||||
excluding <option>gateway_ip</option> (if
|
||||
configured).</td>
|
||||
<td><para>List of cidr sub-ranges that are
|
||||
available for dynamic allocation to
|
||||
ports. Syntax:</para>
|
||||
<td>Every address in <option>cidr</option>, excluding
|
||||
<option>gateway_ip</option> (if configured).</td>
|
||||
<td><para>List of cidr sub-ranges that are available for dynamic allocation
|
||||
to ports. Syntax:</para>
|
||||
<programlisting language="json">[ { "start":"10.0.0.2",
|
||||
"end": "10.0.0.254"} ]</programlisting></td>
|
||||
</tr>
|
||||
@ -204,37 +167,32 @@
|
||||
<td><option>cidr</option></td>
|
||||
<td>string</td>
|
||||
<td>N/A</td>
|
||||
<td>IP range for this subnet, based on the IP
|
||||
version.</td>
|
||||
<td>IP range for this subnet, based on the IP version.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>dns_nameservers</option></td>
|
||||
<td>list(string)</td>
|
||||
<td>Empty list</td>
|
||||
<td>List of DNS name servers used by hosts in
|
||||
this subnet.</td>
|
||||
<td>List of DNS name servers used by hosts in this subnet.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>enable_dhcp</option></td>
|
||||
<td>bool</td>
|
||||
<td>True</td>
|
||||
<td>Specifies whether DHCP is enabled for this
|
||||
subnet.</td>
|
||||
<td>Specifies whether DHCP is enabled for this subnet.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>gateway_ip</option></td>
|
||||
<td>string</td>
|
||||
<td>First address in <option>cidr</option></td>
|
||||
<td>Default gateway used by devices in this
|
||||
subnet.</td>
|
||||
<td>Default gateway used by devices in this subnet.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>host_routes</option></td>
|
||||
<td>list(dict)</td>
|
||||
<td>Empty list</td>
|
||||
<td>Routes that should be used by devices with
|
||||
IPs from this subnet (not including local
|
||||
subnet route).</td>
|
||||
<td>Routes that should be used by devices with IPs from this subnet (not
|
||||
including local subnet route).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>id</option></td>
|
||||
@ -252,24 +210,20 @@
|
||||
<td><option>name</option></td>
|
||||
<td>string</td>
|
||||
<td>None</td>
|
||||
<td>Human-readable name for this subnet (might
|
||||
not be unique).</td>
|
||||
<td>Human-readable name for this subnet (might not be unique).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>network_id</option></td>
|
||||
<td>uuid-string</td>
|
||||
<td>N/A</td>
|
||||
<td>Network with which this subnet is
|
||||
associated.</td>
|
||||
<td>Network with which this subnet is associated.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>tenant_id</option></td>
|
||||
<td>uuid-string</td>
|
||||
<td>N/A</td>
|
||||
<td>Owner of network. Only administrative
|
||||
users can set the tenant identifier; this
|
||||
cannot be changed using authorization
|
||||
policies.</td>
|
||||
<td>Owner of network. Only administrative users can set the tenant
|
||||
identifier; this cannot be changed using authorization policies.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@ -293,31 +247,28 @@
|
||||
<td><option>admin_state_up</option></td>
|
||||
<td>bool</td>
|
||||
<td>true</td>
|
||||
<td>Administrative state of this port. If
|
||||
specified as False (down), this port does
|
||||
not forward packets.</td>
|
||||
<td>Administrative state of this port. If specified as False (down), this
|
||||
port does not forward packets.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>device_id</option></td>
|
||||
<td>string</td>
|
||||
<td>None</td>
|
||||
<td>Identifies the device using this port (for
|
||||
example, a virtual server's ID).</td>
|
||||
<td>Identifies the device using this port (for example, a virtual server's
|
||||
ID).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>device_owner</option></td>
|
||||
<td>string</td>
|
||||
<td>None</td>
|
||||
<td>Identifies the entity using this port (for
|
||||
example, a dhcp agent).</td>
|
||||
<td>Identifies the entity using this port (for example, a dhcp agent).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>fixed_ips</option></td>
|
||||
<td>list(dict)</td>
|
||||
<td>Automatically allocated from pool</td>
|
||||
<td>Specifies IP addresses for this port;
|
||||
associates the port with the subnets
|
||||
containing the listed IP addresses.</td>
|
||||
<td>Specifies IP addresses for this port; associates the port with the
|
||||
subnets containing the listed IP addresses.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>id</option></td>
|
||||
@ -335,45 +286,38 @@
|
||||
<td><option>name</option></td>
|
||||
<td>string</td>
|
||||
<td>None</td>
|
||||
<td>Human-readable name for this port (might
|
||||
not be unique).</td>
|
||||
<td>Human-readable name for this port (might not be unique).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>network_id</option></td>
|
||||
<td>uuid-string</td>
|
||||
<td>N/A</td>
|
||||
<td>Network with which this port is
|
||||
associated.</td>
|
||||
<td>Network with which this port is associated.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>status</option></td>
|
||||
<td>string</td>
|
||||
<td>N/A</td>
|
||||
<td>Indicates whether the network is currently
|
||||
operational.</td>
|
||||
<td>Indicates whether the network is currently operational.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><option>tenant_id</option></td>
|
||||
<td>uuid-string</td>
|
||||
<td>N/A</td>
|
||||
<td>Owner of the network. Only administrative
|
||||
users can set the tenant identifier; this
|
||||
cannot be changed using authorization
|
||||
policies.</td>
|
||||
<td>Owner of the network. Only administrative users can set the tenant
|
||||
identifier; this cannot be changed using authorization policies.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
<section xml:id="basic_operations">
|
||||
<title>Basic Networking operations</title>
|
||||
<para>To learn about advanced capabilities available
|
||||
through the neutron command-line interface (CLI), read
|
||||
the networking section in the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/index.html"
|
||||
> OpenStack End User Guide</link>.</para>
|
||||
<para>This table shows example neutron commands that
|
||||
enable you to complete basic network
|
||||
operations:</para>
|
||||
<para>To learn about advanced capabilities available through the neutron command-line
|
||||
interface (CLI), read the networking section in the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/content/index.html"> OpenStack
|
||||
End User Guide</link>.</para>
|
||||
<para>This table shows example neutron commands that enable you to complete basic
|
||||
network operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Basic Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
@ -390,8 +334,7 @@
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-create net1</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet that is associated with
|
||||
net1.</td>
|
||||
<td>Creates a subnet that is associated with net1.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
@ -399,60 +342,49 @@
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lists ports for a specified tenant and
|
||||
displays the <option>id</option>,
|
||||
<option>fixed_ips</option>, and
|
||||
<option>device_owner</option>
|
||||
<td>Lists ports for a specified tenant and displays the <option>id</option>,
|
||||
<option>fixed_ips</option>, and <option>device_owner</option>
|
||||
columns.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list -c id -c fixed_ips -c device_owner</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Shows information for a specified
|
||||
port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-show <replaceable>port-id</replaceable></userinput></screen></td>
|
||||
<td>Shows information for a specified port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-show <replaceable>PORT_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>The <option>device_owner</option> field
|
||||
describes who owns the port. A port whose
|
||||
<option>device_owner</option> begins
|
||||
with:</para>
|
||||
<para>The <option>device_owner</option> field describes who owns the port. A port
|
||||
whose <option>device_owner</option> begins with:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>network</literal> is created by
|
||||
Networking.</para>
|
||||
<para><literal>network</literal> is created by Networking.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>compute</literal> is created by
|
||||
Compute.</para>
|
||||
<para><literal>compute</literal> is created by Compute.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="admin_api_config">
|
||||
<title>Administrative operations</title>
|
||||
<para>The cloud administrator can run any
|
||||
<command>neutron</command> command on behalf of
|
||||
tenants by specifying an Identity
|
||||
<option>tenant_id</option> in the command, as
|
||||
<para>The cloud administrator can run any <command>neutron</command> command on behalf
|
||||
of tenants by specifying an Identity <option>tenant_id</option> in the command, as
|
||||
follows:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create --tenant-id=<replaceable>tenant-id</replaceable> <replaceable>network-name</replaceable></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create --tenant-id=<replaceable>TENANT_ID</replaceable> <replaceable>NETWORK_NAME</replaceable></userinput></screen>
|
||||
<para>For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create --tenant-id=5e4bbe24b67a4410bc4d9fae29ec394e net1</userinput></screen>
|
||||
<note>
|
||||
<para>To view all tenant IDs in Identity, run the
|
||||
following command as an Identity Service admin
|
||||
user:</para>
|
||||
<para>To view all tenant IDs in Identity, run the following command as an Identity
|
||||
Service admin user:</para>
|
||||
<screen><prompt>$</prompt> <userinput>keystone tenant-list</userinput></screen>
|
||||
</note>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<section xml:id="advanced_networking">
|
||||
<title>Advanced Networking operations</title>
|
||||
<para>This table shows example Networking commands that
|
||||
enable you to complete advanced network
|
||||
operations:</para>
|
||||
<para>This table shows example Networking commands that enable you to complete advanced
|
||||
network operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Advanced Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
@ -465,18 +397,15 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Creates a network that all tenants can
|
||||
use.</td>
|
||||
<td>Creates a network that all tenants can use.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-create --shared public-net</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified gateway
|
||||
IP address.</td>
|
||||
<td>Creates a subnet with a specified gateway IP address.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create --gateway 10.0.0.254 net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet that has no gateway IP
|
||||
address.</td>
|
||||
<td>Creates a subnet that has no gateway IP address.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create --no-gateway net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
@ -484,19 +413,16 @@
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create net1 10.0.0.0/24 --enable_dhcp False</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified set of
|
||||
host routes.</td>
|
||||
<td>Creates a subnet with a specified set of host routes.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create test-net1 40.0.0.0/24 --host_routes type=dict list=true destination=40.0.1.0/24,nexthop=40.0.0.2</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified set of
|
||||
dns name servers.</td>
|
||||
<td>Creates a subnet with a specified set of dns name servers.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create test-net1 40.0.0.0/24 --dns_nameservers list=true 8.8.8.7 8.8.8.8</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Displays all ports and IPs allocated on a
|
||||
network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --network_id <replaceable>net-id</replaceable></userinput></screen></td>
|
||||
<td>Displays all ports and IPs allocated on a network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --network_id <replaceable>NET_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@ -507,12 +433,10 @@
|
||||
<title>Use Compute with Networking</title>
|
||||
<section xml:id="basic_workflow_with_nova">
|
||||
<title>Basic Compute and Networking operations</title>
|
||||
<para>This table shows example neutron and nova commands
|
||||
that enable you to complete basic VM networking
|
||||
operations:</para>
|
||||
<para>This table shows example neutron and nova commands that enable you to complete
|
||||
basic VM networking operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Basic Compute and Networking
|
||||
operations</caption>
|
||||
<caption>Basic Compute and Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
@ -527,52 +451,41 @@
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-list</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM with a single NIC on a selected
|
||||
Networking network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>img</replaceable> --flavor <replaceable>flavor</replaceable> --nic net-id=<replaceable>net-id</replaceable> <replaceable>vm-name</replaceable></userinput></screen></td>
|
||||
<td>Boots a VM with a single NIC on a selected Networking network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic net-id=<replaceable>NET_ID</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>Searches for ports with a
|
||||
<option>device_id</option> that
|
||||
matches the Compute instance UUID. See
|
||||
<xref
|
||||
linkend="network_compute_note"
|
||||
<td><para>Searches for ports with a <option>device_id</option> that matches
|
||||
the Compute instance UUID. See <xref linkend="network_compute_note"
|
||||
/>.</para></td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --device_id=<replaceable>vm-id</replaceable></userinput></screen></td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --device_id=<replaceable>VM_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Searches for ports, but shows only the
|
||||
<option>mac_address</option> of the
|
||||
port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --field mac_address --device_id=<replaceable>vm-id</replaceable></userinput></screen></td>
|
||||
<td>Searches for ports, but shows only the <option>mac_address</option> of
|
||||
the port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --field mac_address --device_id=<replaceable>VM_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Temporarily disables a port from sending
|
||||
traffic.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-update <replaceable>port-id</replaceable> --admin_state_up=False</userinput></screen></td>
|
||||
<td>Temporarily disables a port from sending traffic.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-update <replaceable>PORT_ID</replaceable> --admin_state_up=False</userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>The <option>device_id</option> can also be a
|
||||
logical router ID.</para>
|
||||
<para>The <option>device_id</option> can also be a logical router ID.</para>
|
||||
</note>
|
||||
<note xml:id="network_compute_note">
|
||||
<title>Create and delete VMs</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When you boot a Compute VM, a port on
|
||||
the network that corresponds to the VM NIC
|
||||
is automatically created and associated
|
||||
with the default security group. You can
|
||||
configure <link
|
||||
linkend="enabling_ping_and_ssh"
|
||||
>security group rules</link> to enable
|
||||
users to access the VM.</para>
|
||||
<para>When you boot a Compute VM, a port on the network that corresponds to
|
||||
the VM NIC is automatically created and associated with the default
|
||||
security group. You can configure <link linkend="enabling_ping_and_ssh"
|
||||
>security group rules</link> to enable users to access the
|
||||
VM.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you delete a Compute VM, the
|
||||
underlying Networking port is
|
||||
<para>When you delete a Compute VM, the underlying Networking port is
|
||||
automatically deleted.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
@ -580,9 +493,8 @@
|
||||
</section>
|
||||
<section xml:id="advanced_vm_creation">
|
||||
<title>Advanced VM creation operations</title>
|
||||
<para>This table shows example nova and neutron commands
|
||||
that enable you to complete advanced VM creation
|
||||
operations:</para>
|
||||
<para>This table shows example nova and neutron commands that enable you to complete
|
||||
advanced VM creation operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Advanced VM creation operations</caption>
|
||||
<col width="40%"/>
|
||||
@ -596,46 +508,35 @@
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Boots a VM with multiple NICs.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>img</replaceable> --flavor <replaceable>flavor</replaceable> --nic net-id=<replaceable>net1-id</replaceable> --nic net-id=<replaceable>net2-id</replaceable> <replaceable>vm-name</replaceable></userinput></screen></td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic net-id=<replaceable>NET1-ID</replaceable> --nic net-id=<replaceable>NET2-ID</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM with a specific IP address.
|
||||
First, create an Networking port with a
|
||||
specific IP address. Then, boot a VM
|
||||
specifying a <option>port-id</option>
|
||||
rather than a
|
||||
<option>net-id</option>.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-create --fixed-ip subnet_id=<replaceable>subnet-id</replaceable>,ip_address=<replaceable>IP</replaceable> <replaceable>net-id</replaceable></userinput>
|
||||
<prompt>$</prompt> <userinput>nova boot --image <replaceable>img</replaceable> --flavor <replaceable>flavor</replaceable> --nic port-id=<replaceable>port-id</replaceable> <replaceable>vm-name</replaceable></userinput></screen></td>
|
||||
<td>Boots a VM with a specific IP address. First, create an Networking port
|
||||
with a specific IP address. Then, boot a VM specifying a
|
||||
<option>port-id</option> rather than a <option>net-id</option>.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-create --fixed-ip subnet_id=<replaceable>SUBNET_ID</replaceable>,ip_address=<replaceable>IP_ADDRESS</replaceable> <replaceable>NET_ID</replaceable></userinput>
|
||||
<prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic port-id=<replaceable>PORT_ID</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM that connects to all networks
|
||||
that are accessible to the tenant who
|
||||
submits the request (without the
|
||||
<parameter>--nic</parameter>
|
||||
<td>Boots a VM that connects to all networks that are accessible to the
|
||||
tenant who submits the request (without the <parameter>--nic</parameter>
|
||||
option).</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>img</replaceable> --flavor <replaceable>flavor</replaceable> <replaceable>vm-name</replaceable></userinput></screen></td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>Networking does not currently support the
|
||||
<parameter>v4-fixed-ip</parameter> parameter
|
||||
of the <parameter>--nic</parameter> option for the
|
||||
<para>Networking does not currently support the <parameter>v4-fixed-ip</parameter>
|
||||
parameter of the <parameter>--nic</parameter> option for the
|
||||
<command>nova</command> command.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>Cloud images that distribution vendors offer
|
||||
usually have only one active NIC configured. When
|
||||
you boot with multiple NICs, you must configure
|
||||
additional interfaces on the image or the NICS are
|
||||
not reachable.</para>
|
||||
<para>The following Debian/Ubuntu-based example shows
|
||||
how to set up the interfaces within the instance
|
||||
in the
|
||||
<filename>/etc/network/interfaces</filename>
|
||||
file. You must apply this configuration to the
|
||||
image.</para>
|
||||
<para>Cloud images that distribution vendors offer usually have only one active NIC
|
||||
configured. When you boot with multiple NICs, you must configure additional
|
||||
interfaces on the image or the NICS are not reachable.</para>
|
||||
<para>The following Debian/Ubuntu-based example shows how to set up the interfaces
|
||||
within the instance in the <filename>/etc/network/interfaces</filename> file.
|
||||
You must apply this configuration to the image.</para>
|
||||
<programlisting language="bash"># The loopback network interface
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
@ -648,20 +549,15 @@ iface eth1 inet dhcp</programlisting>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="enabling_ping_and_ssh">
|
||||
<title>Enable ping and SSH on VMs (security
|
||||
groups)</title>
|
||||
<para>You must configure security group rules depending on
|
||||
the type of plug-in you are using. If you are using a
|
||||
plug-in that:</para>
|
||||
<title>Enable ping and SSH on VMs (security groups)</title>
|
||||
<para>You must configure security group rules depending on the type of plug-in you are
|
||||
using. If you are using a plug-in that:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Implements Networking security groups, you
|
||||
can configure security group rules directly by
|
||||
using the <command>neutron
|
||||
security-group-rule-create</command>
|
||||
command. This example enables
|
||||
<command>ping</command> and
|
||||
<command>ssh</command> access to your
|
||||
<para>Implements Networking security groups, you can configure security group
|
||||
rules directly by using the <command>neutron
|
||||
security-group-rule-create</command> command. This example enables
|
||||
<command>ping</command> and <command>ssh</command> access to your
|
||||
VMs.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron security-group-rule-create --protocol icmp \
|
||||
--direction ingress default</userinput></screen>
|
||||
@ -669,27 +565,20 @@ iface eth1 inet dhcp</programlisting>
|
||||
--port-range-max 22 --direction ingress default</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Does not implement Networking security
|
||||
groups, you can configure security group rules
|
||||
by using the <command>nova
|
||||
secgroup-add-rule</command> or
|
||||
<command>euca-authorize</command> command.
|
||||
These <command>nova</command> commands enable
|
||||
<command>ping</command> and
|
||||
<command>ssh</command> access to your
|
||||
VMs.</para>
|
||||
<para>Does not implement Networking security groups, you can configure security
|
||||
group rules by using the <command>nova secgroup-add-rule</command> or
|
||||
<command>euca-authorize</command> command. These <command>nova</command>
|
||||
commands enable <command>ping</command> and <command>ssh</command> access to
|
||||
your VMs.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0</userinput>
|
||||
<prompt>$</prompt> <userinput>nova secgroup-add-rule default tcp 22 22 0.0.0.0/0</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>If your plug-in implements Networking security
|
||||
groups, you can also leverage Compute security
|
||||
groups by setting <code>security_group_api =
|
||||
neutron</code> in the
|
||||
<filename>nova.conf</filename> file. After you
|
||||
set this option, all Compute security group
|
||||
commands are proxied to Networking.</para>
|
||||
<para>If your plug-in implements Networking security groups, you can also leverage
|
||||
Compute security groups by setting <code>security_group_api = neutron</code> in
|
||||
the <filename>nova.conf</filename> file. After you set this option, all Compute
|
||||
security group commands are proxied to Networking.</para>
|
||||
</note>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -1,9 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-adv-features">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_networking-adv-features">
|
||||
<title>Advanced features through API extensions</title>
|
||||
<para>Several plug-ins implement API extensions that provide capabilities similar to what was
|
||||
available in nova-network: These plug-ins are likely to be of interest to the OpenStack
|
||||
@ -185,45 +183,43 @@
|
||||
administrative role.</para>
|
||||
<para>This list shows example neutron commands that enable you to complete basic
|
||||
provider extension API operations:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Shows all attributes of a network, including provider
|
||||
attributes:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-show <replaceable>NAME_OR_NET_ID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a local provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type local</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create flat networks,
|
||||
<replaceable>PHYS_NET_NAME</replaceable> must be known to the plug-in. See the
|
||||
<citetitle>OpenStack
|
||||
Configuration Reference</citetitle> for details. Creates a flat provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type flat --provider:physical_network <replaceable>PHYS_NET_NAME</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create VLAN networks,
|
||||
<replaceable>PHYS_NET_NAME</replaceable> must be known to the plug-in. See the
|
||||
<citetitle>OpenStack
|
||||
Configuration Reference</citetitle> for details on configuring
|
||||
network_vlan_ranges to identify all physical networks. When you
|
||||
create VLAN networks, <replaceable>VID</replaceable> can fall either within or outside
|
||||
any configured ranges of VLAN IDs from which tenant networks are
|
||||
allocated. Creates a VLAN provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type vlan --provider:physical_network <replaceable>PHYS_NET_NAME</replaceable> --provider:segmentation_id <replaceable>VID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create GRE networks,
|
||||
<replaceable>TUNNEL_ID</replaceable> can be either inside or outside any tunnel ID
|
||||
ranges from which tenant networks are allocated.</para>
|
||||
<para>After you create provider networks, you can allocate subnets,
|
||||
which you can use in the same way as other virtual networks, subject
|
||||
to authorization policy based on the specified
|
||||
<replaceable>TENANT_ID</replaceable>. Creates a GRE provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type gre --provider:segmentation_id <replaceable>TUNNEL_ID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Shows all attributes of a network, including provider attributes:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-show <replaceable>NAME_OR_NET_ID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a local provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type local</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create flat networks, <replaceable>PHYS_NET_NAME</replaceable>
|
||||
must be known to the plug-in. See the <citetitle>OpenStack Configuration
|
||||
Reference</citetitle> for details. Creates a flat provider
|
||||
network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type flat --provider:physical_network <replaceable>PHYS_NET_NAME</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create VLAN networks, <replaceable>PHYS_NET_NAME</replaceable>
|
||||
must be known to the plug-in. See the <citetitle>OpenStack Configuration
|
||||
Reference</citetitle> for details on configuring network_vlan_ranges to
|
||||
identify all physical networks. When you create VLAN networks,
|
||||
<replaceable>VID</replaceable> can fall either within or outside any
|
||||
configured ranges of VLAN IDs from which tenant networks are allocated.
|
||||
Creates a VLAN provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type vlan --provider:physical_network <replaceable>PHYS_NET_NAME</replaceable> --provider:segmentation_id <replaceable>VID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you create GRE networks, <replaceable>TUNNEL_ID</replaceable> can be
|
||||
either inside or outside any tunnel ID ranges from which tenant networks are
|
||||
allocated.</para>
|
||||
<para>After you create provider networks, you can allocate subnets, which you
|
||||
can use in the same way as other virtual networks, subject to authorization
|
||||
policy based on the specified <replaceable>TENANT_ID</replaceable>. Creates
|
||||
a GRE provider network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create <replaceable>NAME</replaceable> --tenant_id <replaceable>TENANT_ID</replaceable> --provider:network_type gre --provider:segmentation_id <replaceable>TUNNEL_ID</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="section_l3_router_and_nat">
|
||||
@ -233,10 +229,9 @@
|
||||
provides abstract L3 routers that API users can dynamically provision and configure.
|
||||
These Networking routers can connect multiple L2 Networking networks and can also
|
||||
provide a gateway that connects one or more private L2 networks to a shared external
|
||||
network. For example, a public network for access to the
|
||||
Internet. See the <citetitle>OpenStack Configuration
|
||||
Reference</citetitle> for details on common models of
|
||||
deploying Networking L3 routers.</para>
|
||||
network. For example, a public network for access to the Internet. See the
|
||||
<citetitle>OpenStack Configuration Reference</citetitle> for details on common
|
||||
models of deploying Networking L3 routers.</para>
|
||||
<para>The L3 router provides basic NAT capabilities on gateway ports that uplink the router
|
||||
to external networks. This router SNATs all traffic by default and supports floating
|
||||
IPs, which creates a static one-to-one mapping from a public IP on the external network
|
||||
@ -433,12 +428,12 @@
|
||||
<td>
|
||||
<screen><prompt>$</prompt> <userinput>neutron router-gateway-set router1 <replaceable>EXT_NET_ID</replaceable></userinput></screen>
|
||||
<para>The router obtains an interface with the gateway_ip address of the
|
||||
subnet and this interface is attached to a port on the L2
|
||||
Networking network associated with the subnet. The router also gets
|
||||
a gateway interface to the specified external network. This provides
|
||||
SNAT connectivity to the external network as well as support for
|
||||
floating IPs allocated on that external networks. Commonly an
|
||||
external network maps to a network in the provider</para>
|
||||
subnet and this interface is attached to a port on the L2 Networking
|
||||
network associated with the subnet. The router also gets a gateway
|
||||
interface to the specified external network. This provides SNAT
|
||||
connectivity to the external network as well as support for floating
|
||||
IPs allocated on that external networks. Commonly an external
|
||||
network maps to a network in the provider</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
@ -468,8 +463,8 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>Identifies the <replaceable>PORT_ID</replaceable> that represents the VM
|
||||
NIC to which the floating IP should map.</para>
|
||||
<para>Identifies the <replaceable>PORT_ID</replaceable> that represents
|
||||
the VM NIC to which the floating IP should map.</para>
|
||||
</td>
|
||||
<td>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-list -c id -c fixed_ips -- --device_id=<replaceable>INSTANCE_ID</replaceable></userinput></screen>
|
||||
@ -821,36 +816,35 @@
|
||||
<para>This list shows example neutron commands that enable you to complete basic LBaaS
|
||||
operations:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Creates a load balancer pool by using specific provider.</para>
|
||||
<para><parameter>--provider</parameter> is an optional argument. If not
|
||||
used, the pool is created with default provider for LBaaS service. You
|
||||
should configure the default provider in the
|
||||
<literal>[service_providers]</literal> section of
|
||||
<filename>neutron.conf</filename> file. If no default provider is
|
||||
specified for LBaaS, the <parameter>--provider</parameter> option is
|
||||
required for pool creation.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-pool-create --lb-method ROUND_ROBIN --name mypool --protocol HTTP --subnet-id <replaceable>SUBNET_UUID</replaceable> <parameter>--provider <replaceable>PROVIDER_NAME</replaceable></parameter></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Associates two web servers with pool.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-member-create --address <replaceable>WEBSERVER1_IP</replaceable> --protocol-port 80 mypool</userinput>
|
||||
<listitem>
|
||||
<para>Creates a load balancer pool by using specific provider.</para>
|
||||
<para><parameter>--provider</parameter> is an optional argument. If not used, the
|
||||
pool is created with default provider for LBaaS service. You should configure
|
||||
the default provider in the <literal>[service_providers]</literal> section of
|
||||
<filename>neutron.conf</filename> file. If no default provider is specified
|
||||
for LBaaS, the <parameter>--provider</parameter> option is required for pool
|
||||
creation.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-pool-create --lb-method ROUND_ROBIN --name mypool --protocol HTTP --subnet-id <replaceable>SUBNET_UUID</replaceable> <parameter>--provider <replaceable>PROVIDER_NAME</replaceable></parameter></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Associates two web servers with pool.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-member-create --address <replaceable>WEBSERVER1_IP</replaceable> --protocol-port 80 mypool</userinput>
|
||||
<prompt>$</prompt> <userinput>neutron lb-member-create --address <replaceable>WEBSERVER2_IP</replaceable> --protocol-port 80 mypool</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a health monitor that checks to make sure our instances are
|
||||
still running on the specified protocol-port.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-healthmonitor-create --delay 3 --type HTTP --max-retries 3 --timeout 3</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Associates a health monitor with pool.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-healthmonitor-associate <replaceable>HEALTHMONITOR_UUID</replaceable> mypool</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a virtual IP (VIP) address that, when accessed through the
|
||||
load balancer, directs the requests to one of the pool members.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-id <replaceable>SUBNET_UUID</replaceable> mypool</userinput></screen>
|
||||
</listitem>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a health monitor that checks to make sure our instances are still
|
||||
running on the specified protocol-port.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-healthmonitor-create --delay 3 --type HTTP --max-retries 3 --timeout 3</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Associates a health monitor with pool.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-healthmonitor-associate <replaceable>HEALTHMONITOR_UUID</replaceable> mypool</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Creates a virtual IP (VIP) address that, when accessed through the load
|
||||
balancer, directs the requests to one of the pool members.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-id <replaceable>SUBNET_UUID</replaceable> mypool</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
@ -1467,7 +1461,7 @@
|
||||
<para>The <parameter>fields=status</parameter> parameter in Networking API requests
|
||||
always triggers an explicit query to the NSX back end, even when you enable
|
||||
asynchronous state synchronization. For example, <code>GET
|
||||
/v2.0/networks/<replaceable>NET_ID</replaceable>?fields=status&fields=name</code>.</para>
|
||||
/v2.0/networks/<replaceable>NET_ID</replaceable>?fields=status&fields=name</code>.</para>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="section_bigswitch_extensions">
|
||||
|
@ -365,12 +365,12 @@ interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlist
|
||||
<para>To do either of these things, you must run
|
||||
the command within a particular network
|
||||
namespace for the router. The namespace has
|
||||
the name "qrouter-<UUID of the router>.
|
||||
the name "qrouter-<replaceable>ROUTER_UUID</replaceable>.
|
||||
These example commands run in the router
|
||||
namespace with UUID
|
||||
47af3868-0fa8-4447-85f6-1304de32153b:</para>
|
||||
<screen><prompt>#</prompt> <userinput>ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ip addr list</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ping <fixed-ip></userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ping <replaceable>FIXED_IP</replaceable></userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<important>
|
||||
@ -506,9 +506,9 @@ interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlist
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable Load Balancing in the
|
||||
<para>Enable load balancing in the
|
||||
<guimenu>Project</guimenu> section of the
|
||||
dashboard:</para>
|
||||
dashboard.</para>
|
||||
<para>Change the <option>enable_lb</option> option
|
||||
to <parameter>True</parameter> in the
|
||||
<filename>/etc/openstack-dashboard/local_settings</filename>
|
||||
|
@ -1,49 +1,35 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_plugin-config">
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_plugin-config">
|
||||
<title>Plug-in configurations</title>
|
||||
<para>For configurations options, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/config-reference/content/section_networking-options-reference.html"
|
||||
>Networking configuration options</link> in
|
||||
<citetitle>Configuration Reference</citetitle>.
|
||||
These sections explain how to configure specific
|
||||
plug-ins.</para>
|
||||
>Networking configuration options</link> in <citetitle>Configuration
|
||||
Reference</citetitle>. These sections explain how to configure specific plug-ins.</para>
|
||||
<section xml:id="bigswitch_floodlight_plugin">
|
||||
<title>Configure Big Switch (Floodlight REST Proxy) plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the REST Proxy plug-in with
|
||||
OpenStack Networking</title>
|
||||
<title>To use the REST proxy plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file and add this line:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and add this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = bigswitch</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/plugins/bigswitch/restproxy.ini</filename>
|
||||
file for the plug-in and specify a
|
||||
comma-separated list of
|
||||
<systemitem>controller_ip:port</systemitem>
|
||||
pairs:</para>
|
||||
<programlisting language="ini">server = <controller-ip>:<port></programlisting>
|
||||
<para>Edit the <filename>/etc/neutron/plugins/bigswitch/restproxy.ini</filename>
|
||||
file for the plug-in and specify a comma-separated list of
|
||||
<systemitem>controller_ip:port</systemitem> pairs:</para>
|
||||
<programlisting language="ini">server = <replaceable>CONTROLLER_IP</replaceable>:<replaceable>PORT</replaceable></programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
the <citetitle>Installation
|
||||
Guide</citetitle> in the <link
|
||||
xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>.
|
||||
(The link defaults to the Ubuntu
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle> in the <link xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>. (The link defaults to the Ubuntu
|
||||
version.)</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart <systemitem class="service"
|
||||
>neutron-server</systemitem> to apply
|
||||
the settings:</para>
|
||||
<para>Restart <systemitem class="service">neutron-server</systemitem> to apply the
|
||||
settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
@ -51,67 +37,52 @@
|
||||
<section xml:id="brocade_plugin">
|
||||
<title>Configure Brocade plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the Brocade plug-in with OpenStack
|
||||
Networking</title>
|
||||
<title>To use the Brocade plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Install the Brocade-modified Python
|
||||
netconf client (ncclient) library, which
|
||||
is available at <link
|
||||
xlink:href="https://github.com/brocade/ncclient"
|
||||
<para>Install the Brocade-modified Python netconf client (ncclient) library, which
|
||||
is available at <link xlink:href="https://github.com/brocade/ncclient"
|
||||
>https://github.com/brocade/ncclient</link>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>git clone https://www.github.com/brocade/ncclient</userinput></screen>
|
||||
<para>As <systemitem class="username"
|
||||
>root</systemitem>, run this
|
||||
command:</para>
|
||||
<para>As <systemitem class="username">root</systemitem>, run this command:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cd ncclient;python setup.py install</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file and set the following option:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set the
|
||||
following option:</para>
|
||||
<programlisting language="ini">core_plugin = brocade</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/plugins/brocade/brocade.ini</filename>
|
||||
file for the Brocade plug-in and specify
|
||||
the admin user name, password, and IP
|
||||
<para>Edit the <filename>/etc/neutron/plugins/brocade/brocade.ini</filename> file
|
||||
for the Brocade plug-in and specify the admin user name, password, and IP
|
||||
address of the Brocade switch:</para>
|
||||
<programlisting language="ini">[SWITCH]
|
||||
username = <replaceable>admin</replaceable>
|
||||
password = <replaceable>password</replaceable>
|
||||
address = <replaceable>switch mgmt ip address</replaceable>
|
||||
username = <replaceable>ADMIN</replaceable>
|
||||
password = <replaceable>PASSWORD</replaceable>
|
||||
address = <replaceable>SWITCH_MGMT_IP_ADDRESS</replaceable>
|
||||
ostype = NOS</programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
any of the <citetitle>Installation
|
||||
Guides</citetitle> in the <link
|
||||
xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>.
|
||||
(The link defaults to the Ubuntu
|
||||
>Install Networking Services</link> in any of the <citetitle>Installation
|
||||
Guides</citetitle> in the <link xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>. (The link defaults to the Ubuntu
|
||||
version.)</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service"
|
||||
>neutron-server</systemitem> service
|
||||
to apply the settings:</para>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> service to
|
||||
apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="openvswitch_plugin">
|
||||
<title>Configure OVS plug-in</title>
|
||||
<para>If you use the Open vSwitch (OVS) plug-in in a
|
||||
deployment with multiple hosts, you must use
|
||||
either tunneling or vlans to isolate traffic from
|
||||
multiple networks. Tunneling is easier to deploy
|
||||
because it does not require that you configure
|
||||
VLANs on network switches.</para>
|
||||
<para>If you use the Open vSwitch (OVS) plug-in in a deployment with multiple hosts, you
|
||||
must use either tunneling or vlans to isolate traffic from multiple networks. Tunneling
|
||||
is easier to deploy because it does not require that you configure VLANs on network
|
||||
switches.</para>
|
||||
<para>This procedure uses tunneling:</para>
|
||||
<procedure>
|
||||
<title>To configure OpenStack Networking to use
|
||||
the OVS plug-in</title>
|
||||
<title>To configure OpenStack Networking to use the OVS plug-in</title>
|
||||
<step>
|
||||
<para>Edit
|
||||
<filename>/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini</filename>
|
||||
@ -120,35 +91,29 @@ ostype = NOS</programlisting>
|
||||
tenant_network_type=gre
|
||||
tunnel_id_ranges=1:1000
|
||||
# only required for nodes running agents
|
||||
local_ip=<data-net-IP-address-of-node></programlisting>
|
||||
local_ip=<replaceable>DATA_NET_IP_NODE_ADDRESS</replaceable></programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
<citetitle>Installation
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>If you use the neutron DHCP agent, add
|
||||
these lines to the
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename>
|
||||
file:</para>
|
||||
<para>If you use the neutron DHCP agent, add these lines to the
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename> file:</para>
|
||||
<programlisting language="ini">dnsmasq_config_file=/etc/neutron/dnsmasq/dnsmasq-neutron.conf</programlisting>
|
||||
<para>Restart the DHCP service to apply the
|
||||
settings:</para>
|
||||
<para>Restart the DHCP service to apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-dhcp-agent restart</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>To lower the MTU size on instances and
|
||||
prevent packet fragmentation over the GRE
|
||||
tunnel, create the
|
||||
<filename>/etc/neutron/dnsmasq/dnsmasq-neutron.conf</filename>
|
||||
file and add these values:</para>
|
||||
<para>To lower the MTU size on instances and prevent packet fragmentation over the
|
||||
GRE tunnel, create the
|
||||
<filename>/etc/neutron/dnsmasq/dnsmasq-neutron.conf</filename> file and add
|
||||
these values:</para>
|
||||
<programlisting language="ini">dhcp-option-force=26,1400</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem
|
||||
class="service">neutron-server</systemitem>
|
||||
service to apply the settings:</para>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> service to
|
||||
apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
@ -156,110 +121,81 @@ local_ip=<data-net-IP-address-of-node></programlisting>
|
||||
<section xml:id="nsx_plugin">
|
||||
<title>Configure NSX plug-in</title>
|
||||
<procedure>
|
||||
<title>To configure OpenStack Networking to use
|
||||
the NSX plug-in</title>
|
||||
<para>While the instructions in this section refer
|
||||
to the VMware NSX platform, this is formerly
|
||||
known as Nicira NVP.</para>
|
||||
<title>To configure OpenStack Networking to use the NSX plug-in</title>
|
||||
<para>While the instructions in this section refer to the VMware NSX platform, this is
|
||||
formerly known as Nicira NVP.</para>
|
||||
<step>
|
||||
<para>Install the NSX plug-in:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-plugin-vmware</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file and set this line:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = vmware</programlisting>
|
||||
<para>Example
|
||||
<filename>neutron.conf</filename> file
|
||||
for NSX:</para>
|
||||
<para>Example <filename>neutron.conf</filename> file for NSX:</para>
|
||||
<programlisting language="ini">core_plugin = vmware
|
||||
rabbit_host = 192.168.203.10
|
||||
allow_overlapping_ips = True</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>To configure the NSX controller cluster
|
||||
for OpenStack Networking, locate the
|
||||
<literal>[default]</literal> section
|
||||
in the
|
||||
<filename>/etc/neutron/plugins/vmware/nsx.ini</filename>
|
||||
file and add the following entries:</para>
|
||||
<para>To configure the NSX controller cluster for OpenStack Networking, locate the
|
||||
<literal>[default]</literal> section in the
|
||||
<filename>/etc/neutron/plugins/vmware/nsx.ini</filename> file and add the
|
||||
following entries:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To establish and configure the
|
||||
connection with the controller
|
||||
cluster you must set some
|
||||
parameters, including NSX API
|
||||
endpoints, access credentials, and
|
||||
settings for HTTP redirects and
|
||||
retries in case of connection
|
||||
failures:</para>
|
||||
<programlisting language="ini">nsx_user = <admin user name>
|
||||
nsx_password = <password for nsx_user>
|
||||
req_timeout = <timeout in seconds for NSX_requests> # default 30 seconds
|
||||
http_timeout = <tiemout in seconds for single HTTP request> # default 10 seconds
|
||||
retries = <number of HTTP request retries> # default 2
|
||||
redirects = <maximum allowed redirects for a HTTP request> # default 3
|
||||
nsx_controllers = <comma separated list of API endpoints></programlisting>
|
||||
<para>To ensure correct operations,
|
||||
the <literal>nsx_user</literal>
|
||||
user must have administrator
|
||||
credentials on the NSX
|
||||
platform.</para>
|
||||
<para>A controller API endpoint
|
||||
consists of the IP address and port
|
||||
for the controller; if you omit the
|
||||
port, port 443 is used. If multiple
|
||||
API endpoints are specified, it is
|
||||
up to the user to ensure that all
|
||||
these endpoints belong to the same
|
||||
controller cluster. The OpenStack
|
||||
Networking VMware NSX plug-in does
|
||||
not perform this check, and results
|
||||
might be unpredictable.</para>
|
||||
<para>When you specify multiple API
|
||||
endpoints, the plug-in
|
||||
load-balances requests on the
|
||||
various API endpoints.</para>
|
||||
<para>To establish and configure the connection with the controller cluster
|
||||
you must set some parameters, including NSX API endpoints, access
|
||||
credentials, and settings for HTTP redirects and retries in case of
|
||||
connection failures:</para>
|
||||
<programlisting language="ini">nsx_user = <replaceable>ADMIN_USER_NAME</replaceable>
|
||||
nsx_password = <replaceable>NSX_USER_PASSWORD</replaceable>
|
||||
req_timeout = <replaceable>NSX_REQUEST_TIMEOUT</replaceable> # (seconds) default 30 seconds
|
||||
http_timeout = <replaceable>HTTP_REQUEST_TIMEOUT</replaceable> # (seconds) default 10 seconds
|
||||
retries = <replaceable>HTTP_REQUEST_RETRIES</replaceable> # default 2
|
||||
redirects = <replaceable>HTTP_REQUEST_MAX_REDIRECTS</replaceable> # default 3
|
||||
nsx_controllers = <replaceable>API_ENDPOINT_LIST</replaceable> # comma-separated list</programlisting>
|
||||
<para>To ensure correct operations, the <literal>nsx_user</literal> user
|
||||
must have administrator credentials on the NSX platform.</para>
|
||||
<para>A controller API endpoint consists of the IP address and port for the
|
||||
controller; if you omit the port, port 443 is used. If multiple API
|
||||
endpoints are specified, it is up to the user to ensure that all these
|
||||
endpoints belong to the same controller cluster. The OpenStack
|
||||
Networking VMware NSX plug-in does not perform this check, and results
|
||||
might be unpredictable.</para>
|
||||
<para>When you specify multiple API endpoints, the plug-in load-balances
|
||||
requests on the various API endpoints.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The UUID of the NSX Transport
|
||||
Zone that should be used by default
|
||||
when a tenant creates a network.
|
||||
You can get this value from the NSX
|
||||
Manager's Transport Zones
|
||||
page:</para>
|
||||
<programlisting language="ini">default_tz_uuid = <uuid_of_the_transport_zone></programlisting>
|
||||
<para>The UUID of the NSX transport zone that should be used by default when
|
||||
a tenant creates a network. You can get this value from the
|
||||
<guilabel>Transport Zones</guilabel> page for the NSX
|
||||
Manager:</para>
|
||||
<programlisting language="ini">default_tz_uuid = <replaceable>TRANSPORT_ZONE_UUID</replaceable></programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<programlisting language="ini">default_l3_gw_service_uuid = <uuid_of_the_gateway_service></programlisting>
|
||||
<programlisting language="ini">default_l3_gw_service_uuid = <replaceable>GATEWAY_SERVICE_UUID</replaceable></programlisting>
|
||||
<warning>
|
||||
<para>Ubuntu packaging currently
|
||||
does not update the Neutron init
|
||||
script to point to the NSX
|
||||
configuration file. Instead, you
|
||||
must manually update
|
||||
<filename>/etc/default/neutron-server</filename>
|
||||
to add this line:</para>
|
||||
<programlisting language="ini">NEUTRON_PLUGIN_CONFIG = /etc/neutron/plugins/vmware/nsx.ini</programlisting>
|
||||
<para>Ubuntu packaging currently does not update the Neutron init script
|
||||
to point to the NSX configuration file. Instead, you must manually
|
||||
update <filename>/etc/default/neutron-server</filename> to add this
|
||||
line:</para>
|
||||
<programlisting language="ini">NEUTRON_PLUGIN_CONFIG = /etc/neutron/plugins/vmware/nsx.ini</programlisting>
|
||||
</warning>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
<citetitle>Installation
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart <systemitem class="service"
|
||||
>neutron-server</systemitem> to apply
|
||||
<para>Restart <systemitem class="service">neutron-server</systemitem> to apply
|
||||
settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>Example <filename>nsx.ini</filename>
|
||||
file:</para>
|
||||
<para>Example <filename>nsx.ini</filename> file:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
default_tz_uuid = d3afb164-b263-4aaa-a3e4-48e0e09bb33c
|
||||
default_l3_gw_service_uuid=5c8622cc-240a-40a1-9693-e6a5fca4e3cf
|
||||
@ -267,85 +203,58 @@ nsx_user=admin
|
||||
nsx_password=changeme
|
||||
nsx_controllers=10.127.0.100,10.127.0.200:8888</programlisting>
|
||||
<note>
|
||||
<para>To debug <filename>nsx.ini</filename>
|
||||
configuration issues, run this command from
|
||||
the host that runs <systemitem class="service"
|
||||
>neutron-server</systemitem>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>neutron-check-nsx-config <path/to/nsx.ini></userinput></screen>
|
||||
<para>This command tests whether <systemitem
|
||||
class="service"
|
||||
>neutron-server</systemitem> can log into
|
||||
all of the NSX Controllers and the SQL server,
|
||||
and whether all UUID values are
|
||||
correct.</para>
|
||||
<para>To debug <filename>nsx.ini</filename> configuration issues, run this command from
|
||||
the host that runs <systemitem class="service">neutron-server</systemitem>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>neutron-check-nsx-config <replaceable>PATH_TO_NSX.INI</replaceable></userinput></screen>
|
||||
<para>This command tests whether <systemitem class="service">neutron-server</systemitem>
|
||||
can log into all of the NSX Controllers and the SQL server, and whether all UUID
|
||||
values are correct.</para>
|
||||
</note>
|
||||
<section xml:id="LBaaS_and_FWaaS">
|
||||
<title>Load-Balancer-as-a-Service and
|
||||
Firewall-as-a-Service</title>
|
||||
<para>The NSX LBaaS and FWaaS services use the
|
||||
standard OpenStack API with the exception of
|
||||
requiring routed-insertion extension
|
||||
support.</para>
|
||||
<para>The NSX implementation and the community
|
||||
reference implementation of these services
|
||||
differ, as follows:</para>
|
||||
<title>Load-Balancer-as-a-Service and Firewall-as-a-Service</title>
|
||||
<para>The NSX LBaaS and FWaaS services use the standard OpenStack API with the exception
|
||||
of requiring routed-insertion extension support.</para>
|
||||
<para>The NSX implementation and the community reference implementation of these
|
||||
services differ, as follows:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>The NSX LBaaS and FWaaS plug-ins
|
||||
require the routed-insertion
|
||||
extension, which adds the
|
||||
<code>router_id</code> attribute to
|
||||
the VIP (Virtual IP address) and
|
||||
firewall resources and binds these
|
||||
services to a logical router.</para>
|
||||
<para>The NSX LBaaS and FWaaS plug-ins require the routed-insertion extension,
|
||||
which adds the <code>router_id</code> attribute to the VIP (Virtual IP
|
||||
address) and firewall resources and binds these services to a logical
|
||||
router.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The community reference
|
||||
implementation of LBaaS only supports
|
||||
a one-arm model, which restricts the
|
||||
VIP to be on the same subnet as the
|
||||
back-end servers. The NSX LBaaS
|
||||
plug-in only supports a two-arm model
|
||||
between north-south traffic, which
|
||||
means that you can create the VIP on
|
||||
only the external (physical)
|
||||
network.</para>
|
||||
<para>The community reference implementation of LBaaS only supports a one-arm
|
||||
model, which restricts the VIP to be on the same subnet as the back-end
|
||||
servers. The NSX LBaaS plug-in only supports a two-arm model between
|
||||
north-south traffic, which means that you can create the VIP on only the
|
||||
external (physical) network.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The community reference
|
||||
implementation of FWaaS applies
|
||||
firewall rules to all logical routers
|
||||
in a tenant, while the NSX FWaaS
|
||||
plug-in applies firewall rules only to
|
||||
one logical router according to the
|
||||
<code>router_id</code> of the
|
||||
firewall entity.</para>
|
||||
<para>The community reference implementation of FWaaS applies firewall rules to
|
||||
all logical routers in a tenant, while the NSX FWaaS plug-in applies
|
||||
firewall rules only to one logical router according to the
|
||||
<code>router_id</code> of the firewall entity.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<procedure>
|
||||
<title>To configure Load-Balancer-as-a-Service
|
||||
and Firewall-as-a-Service with NSX</title>
|
||||
<title>To configure Load-Balancer-as-a-Service and Firewall-as-a-Service with
|
||||
NSX</title>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file:</para>
|
||||
<programlisting language="ini">core_plugin = neutron.plugins.vmware.plugin.NsxServicePlugin
|
||||
# Note: comment out service_plug-ins. LBaaS & FWaaS is supported by core_plugin NsxServicePlugin
|
||||
# service_plugins = </programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/plugins/vmware/nsx.ini</filename>
|
||||
<para>Edit the <filename>/etc/neutron/plugins/vmware/nsx.ini</filename>
|
||||
file:</para>
|
||||
<para>In addition to the original NSX
|
||||
configuration, the
|
||||
<code>default_l3_gw_service_uuid</code>
|
||||
is required for the NSX Advanced
|
||||
plug-in and you must add a
|
||||
<code>vcns</code> section:</para>
|
||||
<para>In addition to the original NSX configuration, the
|
||||
<code>default_l3_gw_service_uuid</code> is required for the NSX Advanced
|
||||
plug-in and you must add a <code>vcns</code> section:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
nsx_password = <replaceable>admin</replaceable>
|
||||
nsx_user = <replaceable>admin</replaceable>
|
||||
nsx_password = <replaceable>ADMIN</replaceable>
|
||||
nsx_user = <replaceable>ADMIN</replaceable>
|
||||
nsx_controllers = <replaceable>10.37.1.137:443</replaceable>
|
||||
default_l3_gw_service_uuid = <replaceable>aae63e9b-2e4e-4efe-81a1-92cf32e308bf</replaceable>
|
||||
default_tz_uuid = <replaceable>2702f27a-869a-49d1-8781-09331a0f6b9e</replaceable>
|
||||
@ -355,10 +264,10 @@ default_tz_uuid = <replaceable>2702f27a-869a-49d1-8781-09331a0f6b9e</replaceable
|
||||
manager_uri = <replaceable>https://10.24.106.219</replaceable>
|
||||
|
||||
# VSM admin user name
|
||||
user = <replaceable>admin</replaceable>
|
||||
user = <replaceable>ADMIN</replaceable>
|
||||
|
||||
# VSM admin password
|
||||
password = <replaceable>default</replaceable>
|
||||
password = <replaceable>DEFAULT</replaceable>
|
||||
|
||||
# UUID of a logical switch on NSX which has physical network connectivity (currently using bridge transport type)
|
||||
external_network = <replaceable>f2c023cf-76e2-4625-869b-d0dabcfcc638</replaceable>
|
||||
@ -370,8 +279,7 @@ external_network = <replaceable>f2c023cf-76e2-4625-869b-d0dabcfcc638</replaceabl
|
||||
# task_status_check_interval =</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem
|
||||
class="service">neutron-server</systemitem>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem>
|
||||
service to apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
@ -381,22 +289,17 @@ external_network = <replaceable>f2c023cf-76e2-4625-869b-d0dabcfcc638</replaceabl
|
||||
<section xml:id="PLUMgridplugin">
|
||||
<title>Configure PLUMgrid plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the PLUMgrid plug-in with OpenStack
|
||||
Networking</title>
|
||||
<title>To use the PLUMgrid plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file and set this line:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = plumgrid</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<systemitem>[PLUMgridDirector]</systemitem>
|
||||
section in the
|
||||
<filename>/etc/neutron/plugins/plumgrid/plumgrid.ini</filename>
|
||||
file and specify the IP address, port,
|
||||
admin user name, and password of the
|
||||
PLUMgrid Director:</para>
|
||||
<para>Edit the <systemitem>[PLUMgridDirector]</systemitem> section in the
|
||||
<filename>/etc/neutron/plugins/plumgrid/plumgrid.ini</filename> file and
|
||||
specify the IP address, port, admin user name, and password of the PLUMgrid
|
||||
Director:</para>
|
||||
<programlisting language="ini">[PLUMgridDirector]
|
||||
director_server = "PLUMgrid-director-ip-address"
|
||||
director_server_port = "PLUMgrid-director-port"
|
||||
@ -404,14 +307,12 @@ username = "PLUMgrid-director-admin-username"
|
||||
password = "PLUMgrid-director-admin-password"</programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
the <citetitle>Installation
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service"
|
||||
>neutron-server</systemitem> service to apply
|
||||
the settings:</para>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> service to
|
||||
apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
@ -419,74 +320,52 @@ password = "PLUMgrid-director-admin-password"</programlisting>
|
||||
<section xml:id="ryu_plugin">
|
||||
<title>Configure Ryu plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the Ryu plug-in with OpenStack
|
||||
Networking</title>
|
||||
<title>To use the Ryu plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Install the Ryu plug-in:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-plugin-ryu</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file and set this line:</para>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = ryu</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/plugins/ryu/ryu.ini</filename>
|
||||
file and update these options in the
|
||||
<literal>[ovs]</literal> section
|
||||
for the
|
||||
<systemitem class="service">ryu-neutron-agent</systemitem>:</para>
|
||||
<para>Edit the <filename>/etc/neutron/plugins/ryu/ryu.ini</filename> file and update
|
||||
these options in the <literal>[ovs]</literal> section for the <systemitem
|
||||
class="service">ryu-neutron-agent</systemitem>:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><option>openflow_rest_api</option>.
|
||||
Defines where Ryu is listening for
|
||||
REST API. Substitute
|
||||
<option>ip-address</option>
|
||||
and
|
||||
<option>port-no</option>
|
||||
based on your Ryu setup.</para>
|
||||
<para><option>openflow_rest_api</option>. Defines where Ryu is listening for
|
||||
REST API. Substitute <option>ip-address</option> and
|
||||
<option>port-no</option> based on your Ryu setup.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><option>ovsdb_interface</option>.
|
||||
Enables Ryu to access the
|
||||
<systemitem>ovsdb-server</systemitem>.
|
||||
Substitute <literal>eth0</literal>
|
||||
based on your setup. The IP address
|
||||
is derived from the interface name.
|
||||
If you want to change this value
|
||||
irrespective of the interface name,
|
||||
you can specify
|
||||
<option>ovsdb_ip</option>.
|
||||
If you use a non-default port for
|
||||
<systemitem>ovsdb-server</systemitem>,
|
||||
you can specify
|
||||
<option>ovsdb_port</option>.</para>
|
||||
<para><option>ovsdb_interface</option>. Enables Ryu to access the
|
||||
<systemitem>ovsdb-server</systemitem>. Substitute
|
||||
<literal>eth0</literal> based on your setup. The IP address is
|
||||
derived from the interface name. If you want to change this value
|
||||
irrespective of the interface name, you can specify
|
||||
<option>ovsdb_ip</option>. If you use a non-default port for
|
||||
<systemitem>ovsdb-server</systemitem>, you can specify
|
||||
<option>ovsdb_port</option>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><option>tunnel_interface</option>.
|
||||
Defines which IP address is used
|
||||
for tunneling. If you do not use
|
||||
tunneling, this value is ignored.
|
||||
The IP address is derived from the
|
||||
network interface name.</para>
|
||||
<para><option>tunnel_interface</option>. Defines which IP address is used
|
||||
for tunneling. If you do not use tunneling, this value is ignored. The
|
||||
IP address is derived from the network interface name.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/icehouse/install-guide/install/apt/content/neutron-ml2-controller-node.html"
|
||||
>Install Networking Services</link> in
|
||||
<citetitle>Installation
|
||||
>Install Networking Services</link> in <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
<para>You can use the same configuration file
|
||||
for many compute nodes by using a network
|
||||
interface name with a different IP
|
||||
address:</para>
|
||||
<programlisting language="ini">openflow_rest_api = <ip-address>:<port-no> ovsdb_interface = <eth0> tunnel_interface = <eth0></programlisting>
|
||||
<para>You can use the same configuration file for many compute nodes by using a
|
||||
network interface name with a different IP address:</para>
|
||||
<programlisting language="ini">openflow_rest_api = <replaceable>IP_ADDRESS</replaceable>:<replaceable>PORT</replaceable> ovsdb_interface = <replaceable>ETH0</replaceable> tunnel_interface = <replaceable>ETH0</replaceable></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service"
|
||||
>neutron-server</systemitem> to apply
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> to apply
|
||||
the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
|
@ -103,7 +103,7 @@
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_cert_file =
|
||||
<replaceable>/path/to/certfile</replaceable></code></term>
|
||||
<replaceable>PATH_TO_CERTFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Certificate file that is used when you
|
||||
securely start the Networking API
|
||||
@ -112,7 +112,7 @@
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_key_file =
|
||||
<replaceable>/path/to/keyfile</replaceable></code></term>
|
||||
<replaceable>PATH_TO_KEYFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Private key file that is used when you
|
||||
securely start the Networking API
|
||||
@ -121,7 +121,7 @@
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_ca_file =
|
||||
<replaceable>/path/to/cafile</replaceable></code></term>
|
||||
<replaceable>PATH_TO_CAFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Optional. CA certificate file that is used
|
||||
when you securely start the Networking API
|
||||
@ -302,16 +302,16 @@ enabled = True</programlisting>
|
||||
the policy.</para>
|
||||
<step>
|
||||
<para>Create a firewall rule:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-rule-create --protocol <tcp|udp|icmp|any> --destination-port <port-range> --action <allow|deny></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-rule-create --protocol {tcp|udp|icmp|any} --destination-port <replaceable>PORT_RANGE</replaceable> --action {allow|deny}</userinput></screen>
|
||||
<para>The Networking client requires a protocol value;
|
||||
if the rule is protocol agnostic, you can use the
|
||||
<literal>any</literal> value.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a firewall policy:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-policy-create --firewall-rules "<firewall-rule IDs or names separated by space>" myfirewallpolicy</userinput></screen>
|
||||
<para>The order in which you specify the rules is
|
||||
important.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-policy-create --firewall-rules "<replaceable>FIREWALL_RULE_IDS_OR_NAMES</replaceable>" myfirewallpolicy</userinput></screen>
|
||||
<para>Separate firewall rule IDs or names with spaces. The order in which you
|
||||
specify the rules is important.</para>
|
||||
<para>You can create a firewall policy without any
|
||||
rules and add rules later, as follows:<itemizedlist>
|
||||
<listitem>
|
||||
@ -337,7 +337,7 @@ enabled = True</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a firewall:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-create <firewall-policy-uuid></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-create <replaceable>FIREWALL_POLICY_UUID</replaceable></userinput></screen>
|
||||
<note>
|
||||
<para>The firewall remains in
|
||||
<guilabel>PENDING_CREATE</guilabel> state
|
||||
@ -367,12 +367,12 @@ enabled = True</programlisting>
|
||||
<listitem>
|
||||
<para>Create a port with a specified allowed
|
||||
address pairs:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-create net1 --allowed-address-pairs type=dict list=true mac_address=<mac_address>,ip_address=<ip_cidr></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-create net1 --allowed-address-pairs type=dict list=true mac_address=<replaceable>MAC_ADDRESS</replaceable>,ip_address=<replaceable>IP_CIDR</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update a port by adding allowed address
|
||||
pairs:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-update <port-uuid> --allowed-address-pairs type=dict list=true mac_address=<mac_address>,ip_address=<ip_cidr></userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-update <replaceable>PORT_UUID</replaceable> --allowed-address-pairs type=dict list=true mac_address=<replaceable>MAC_ADDRESS</replaceable>,ip_address=<replaceable>IP_CIDR</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist></para>
|
||||
</formalpara>
|
||||
|
@ -47,7 +47,7 @@
|
||||
<plugin>
|
||||
<groupId>com.rackspace.cloud.api</groupId>
|
||||
<artifactId>clouddocs-maven-plugin</artifactId>
|
||||
<version>2.1.1</version>
|
||||
<version>2.1.2</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
Loading…
Reference in New Issue
Block a user