1f7c51e68a
Change-Id: I2b12cf8ac6ad9fa5373e07effd58e3834e91bab7 backport: Juno Closes-Bug: #1404965
232 lines
11 KiB
XML
232 lines
11 KiB
XML
<?xml version="1.0" encoding="UTF-8"?>
|
|
<section xmlns="http://docbook.org/ns/docbook"
|
|
xmlns:xi="http://www.w3.org/2001/XInclude"
|
|
xmlns:xlink="http://www.w3.org/1999/xlink"
|
|
version="5.0"
|
|
xml:id="nova-compute-install">
|
|
<title>Install and configure a compute node</title>
|
|
<para>This section describes how to install and configure the Compute
|
|
service on a compute node. The service supports several
|
|
<glossterm baseform="hypervisor">hypervisors</glossterm> to
|
|
deploy <glossterm baseform="instance">instances</glossterm> or
|
|
<glossterm baseform="virtual machine (VM)">VMs</glossterm>. For simplicity,
|
|
this configuration uses the
|
|
<glossterm baseform="Quick EMUlator (QEMU)">QEMU</glossterm> hypervisor
|
|
with the
|
|
<glossterm baseform="kernel-based VM (KVM)">KVM</glossterm> extension
|
|
on compute nodes that support hardware acceleration for virtual machines.
|
|
On legacy hardware, this configuration uses the generic QEMU hypervisor.
|
|
You can follow these instructions with minor modifications to horizontally
|
|
scale your environment with additional compute nodes.</para>
|
|
<note>
|
|
<para>This section assumes that you are following the instructions in
|
|
this guide step-by-step to configure the first compute node. If you
|
|
want to configure additional compute nodes, prepare them in a similar
|
|
fashion to the first compute node in the
|
|
<link linkend="architecture_example-architectures">example architectures
|
|
</link> section using the same networking service as your existing
|
|
environment. For either networking service, follow the
|
|
<link linkend="basics-ntp-other-nodes">NTP configuration</link> and
|
|
<link linkend="basics-packages">OpenStack packages</link>
|
|
instructions. For OpenStack Networking (neutron), also follow the
|
|
<link linkend="basics-networking-neutron">OpenStack
|
|
Networking compute node</link> instructions. For legacy networking
|
|
(nova-network), also follow the
|
|
<link linkend="basics-networking-nova">legacy networking compute node</link>
|
|
instructions. Each additional compute node requires unique IP
|
|
addresses.</para>
|
|
</note>
|
|
<procedure os="ubuntu;rhel;centos;fedora;sles;opensuse">
|
|
<title>To install and configure the Compute hypervisor components</title>
|
|
<step>
|
|
<para>Install the packages:</para>
|
|
<screen os="ubuntu"><prompt>#</prompt> <userinput>apt-get install nova-compute sysfsutils</userinput></screen>
|
|
<screen os="rhel;centos;fedora"><prompt>#</prompt> <userinput>yum install openstack-nova-compute sysfsutils</userinput></screen>
|
|
<screen os="sles;opensuse"><prompt>#</prompt> <userinput>zypper install openstack-nova-compute genisoimage kvm</userinput></screen>
|
|
</step>
|
|
<step>
|
|
<para>Edit the <filename>/etc/nova/nova.conf</filename> file and
|
|
complete the following actions:</para>
|
|
<substeps>
|
|
<step>
|
|
<para>In the <literal>[DEFAULT]</literal> section, configure
|
|
<application>RabbitMQ</application> message broker access:</para>
|
|
<programlisting language="ini">[DEFAULT]
|
|
...
|
|
rpc_backend = rabbit
|
|
rabbit_host = <replaceable>controller</replaceable>
|
|
rabbit_password = <replaceable>RABBIT_PASS</replaceable></programlisting>
|
|
<para>Replace <replaceable>RABBIT_PASS</replaceable> with the password
|
|
you chose for the <literal>guest</literal> account in
|
|
<application>RabbitMQ</application>.</para>
|
|
</step>
|
|
<step>
|
|
<para>In the <literal>[DEFAULT]</literal> and
|
|
<literal>[keystone_authtoken]</literal> sections,
|
|
configure Identity service access:</para>
|
|
<programlisting language="ini">[DEFAULT]
|
|
...
|
|
auth_strategy = keystone
|
|
|
|
[keystone_authtoken]
|
|
...
|
|
auth_uri = http://<replaceable>controller</replaceable>:5000/v2.0
|
|
identity_uri = http://<replaceable>controller</replaceable>:35357
|
|
admin_tenant_name = service
|
|
admin_user = nova
|
|
admin_password = <replaceable>NOVA_PASS</replaceable></programlisting>
|
|
<para>Replace <replaceable>NOVA_PASS</replaceable> with the password
|
|
you chose for the <literal>nova</literal> user in the Identity
|
|
service.</para>
|
|
<note>
|
|
<para>Comment out any <literal>auth_host</literal>,
|
|
<literal>auth_port</literal>, and
|
|
<literal>auth_protocol</literal> options because the
|
|
<literal>identity_uri</literal> option replaces them.</para>
|
|
</note>
|
|
</step>
|
|
<step>
|
|
<para>In the <literal>[DEFAULT]</literal> section, configure the
|
|
<literal>my_ip</literal> option:</para>
|
|
<programlisting language="ini">[DEFAULT]
|
|
...
|
|
my_ip = <replaceable>MANAGEMENT_INTERFACE_IP_ADDRESS</replaceable></programlisting>
|
|
<para>Replace
|
|
<replaceable>MANAGEMENT_INTERFACE_IP_ADDRESS</replaceable> with
|
|
the IP address of the management network interface on your
|
|
compute node, typically 10.0.0.31 for the first node in the
|
|
<link linkend="architecture_example-architectures">example
|
|
architecture</link>.</para>
|
|
</step>
|
|
<step>
|
|
<para>In the <literal>[DEFAULT]</literal> section, enable and
|
|
configure remote console access:</para>
|
|
<programlisting language="ini">[DEFAULT]
|
|
...
|
|
vnc_enabled = True
|
|
vncserver_listen = 0.0.0.0
|
|
vncserver_proxyclient_address = <replaceable>MANAGEMENT_INTERFACE_IP_ADDRESS</replaceable>
|
|
novncproxy_base_url = http://<replaceable>controller</replaceable>:6080/vnc_auto.html</programlisting>
|
|
<para>The server component listens on all IP addresses and the proxy
|
|
component only listens on the management interface IP address of
|
|
the compute node. The base URL indicates the location where you
|
|
can use a web browser to access remote consoles of instances
|
|
on this compute node.</para>
|
|
<para>Replace
|
|
<replaceable>MANAGEMENT_INTERFACE_IP_ADDRESS</replaceable> with
|
|
the IP address of the management network interface on your
|
|
compute node, typically 10.0.0.31 for the first node in the
|
|
<link linkend="architecture_example-architectures">example
|
|
architecture</link>.</para>
|
|
<note>
|
|
<para>If the web browser to access remote consoles resides on a
|
|
host that cannot resolve the
|
|
<replaceable>controller</replaceable> hostname, you must replace
|
|
<replaceable>controller</replaceable> with the management
|
|
interface IP address of the controller node.</para>
|
|
</note>
|
|
</step>
|
|
<step>
|
|
<para>In the <literal>[glance]</literal> section, configure the
|
|
location of the Image Service:</para>
|
|
<programlisting language="ini">[glance]
|
|
...
|
|
host = <replaceable>controller</replaceable></programlisting>
|
|
</step>
|
|
<step>
|
|
<para>(Optional) To assist with troubleshooting,
|
|
enable verbose logging in the <literal>[DEFAULT]</literal> section:</para>
|
|
<programlisting language="ini">[DEFAULT]
|
|
...
|
|
verbose = True</programlisting>
|
|
</step>
|
|
</substeps>
|
|
</step>
|
|
<step os="opensuse;sles">
|
|
<substeps>
|
|
<step>
|
|
<para>Ensure the kernel module <literal>nbd</literal> is
|
|
loaded.</para>
|
|
<screen><prompt>#</prompt> <userinput>modprobe nbd</userinput></screen>
|
|
</step>
|
|
<step>
|
|
<para>Ensure the module will be loaded on every boot.</para>
|
|
<para>On openSUSE by adding <literal>nbd</literal> in the
|
|
<filename>/etc/modules-load.d/nbd.conf</filename> file.</para>
|
|
<para>On SLES by adding or modifying the following line in the
|
|
<filename>/etc/sysconfig/kernel</filename> file.</para>
|
|
<programlisting language="ini">MODULES_LOADED_ON_BOOT = "nbd"</programlisting>
|
|
</step>
|
|
</substeps>
|
|
</step>
|
|
</procedure>
|
|
<procedure os="debian">
|
|
<title>To install and configure the Compute hypervisor components</title>
|
|
<step>
|
|
<para>Install the packages:</para>
|
|
<screen><prompt>#</prompt> <userinput>apt-get install nova-compute</userinput></screen>
|
|
</step>
|
|
<step>
|
|
<para>Respond to the prompts for
|
|
<link linkend="debconf-dbconfig-common">database management</link>,
|
|
<link linkend="debconf-keystone_authtoken">Identity service
|
|
credentials</link>,
|
|
<link linkend="debconf-api-endpoints">service endpoint
|
|
registration</link>, and
|
|
<link linkend="debconf-rabbitmq">message broker
|
|
credentials.</link>.</para>
|
|
</step>
|
|
</procedure>
|
|
<procedure>
|
|
<title>To finalize installation</title>
|
|
<step>
|
|
<para>Determine whether your compute node supports hardware acceleration
|
|
for virtual machines:</para>
|
|
<screen><prompt>$</prompt> <userinput>egrep -c '(vmx|svm)' /proc/cpuinfo</userinput></screen>
|
|
<para>If this command returns a value of
|
|
<emphasis>one or greater</emphasis>, your compute node supports
|
|
hardware acceleration which typically requires no additional
|
|
configuration.</para>
|
|
<para>If this command returns a value of <emphasis>zero</emphasis>,
|
|
your compute node does not support hardware acceleration and you must
|
|
configure <literal>libvirt</literal> to use QEMU instead of KVM.</para>
|
|
<substeps>
|
|
<step>
|
|
<para>Edit the <literal>[libvirt]</literal>
|
|
section in the
|
|
<filename os="ubuntu;debian">/etc/nova/nova-compute.conf</filename>
|
|
<filename os="rhel;centos;fedora;sles;opensuse"
|
|
>/etc/nova/nova.conf</filename> file as follows:</para>
|
|
<programlisting language="ini">[libvirt]
|
|
...
|
|
virt_type = qemu</programlisting>
|
|
</step>
|
|
</substeps>
|
|
</step>
|
|
<step os="ubuntu;debian">
|
|
<para>Restart the Compute service:</para>
|
|
<screen><prompt>#</prompt> <userinput>service nova-compute restart</userinput></screen>
|
|
</step>
|
|
<step os="rhel;centos;fedora;sles;opensuse">
|
|
<para>Start the Compute service including its dependencies and configure
|
|
them to start automatically when the system boots:</para>
|
|
<screen os="rhel;centos;fedora"><prompt>#</prompt> <userinput>systemctl enable libvirtd.service openstack-nova-compute.service</userinput>
|
|
<prompt>#</prompt> <userinput>systemctl start libvirtd.service openstack-nova-compute.service</userinput></screen>
|
|
<para os="sles">On SLES:</para>
|
|
<screen os="sles"><prompt>#</prompt> <userinput>service libvirtd start</userinput>
|
|
<prompt>#</prompt> <userinput>chkconfig libvirtd on</userinput>
|
|
<prompt>#</prompt> <userinput>service openstack-nova-compute start</userinput>
|
|
<prompt>#</prompt> <userinput>chkconfig openstack-nova-compute on</userinput></screen>
|
|
<para os="opensuse">On openSUSE:</para>
|
|
<screen os="opensuse"><prompt>#</prompt> <userinput>systemctl enable libvirtd.service openstack-nova-compute.service</userinput>
|
|
<prompt>#</prompt> <userinput>systemctl start libvirtd.service openstack-nova-compute.service</userinput></screen>
|
|
</step>
|
|
<step os="ubuntu">
|
|
<para>By default, the Ubuntu packages create an SQLite database.</para>
|
|
<para>Because this configuration uses a SQL database server, you can
|
|
remove the SQLite database file:</para>
|
|
<screen><prompt>#</prompt> <userinput>rm -f /var/lib/nova/nova.sqlite</userinput></screen>
|
|
</step>
|
|
</procedure>
|
|
</section>
|