Merge "[codespell] Fixing Spelling Mistakes"
This commit is contained in:
commit
df9e1ba80e
@ -273,7 +273,7 @@ GET v1/lookup?node_uuid=$NID > lookup-node-response.json
|
||||
# and the node's driver is "fake", to avoid potential races
|
||||
# with internal processes that lock the Node
|
||||
|
||||
# this corrects an intentional ommission in some of the samples
|
||||
# this corrects an intentional omission in some of the samples
|
||||
PATCH v1/nodes/$NID node-update-driver-info-request.json > node-update-driver-info-response.json
|
||||
|
||||
GET v1/nodes/$NID/management/boot_device/supported > node-get-supported-boot-devices-response.json
|
||||
|
@ -52,7 +52,7 @@ parameters must be missing or match the provided node.
|
||||
|
||||
.. versionadded:: 1.79
|
||||
A node with the same name as the allocation ``name`` is moved to the
|
||||
start of the derived candidiate list.
|
||||
start of the derived candidate list.
|
||||
|
||||
Normal response codes: 201
|
||||
|
||||
|
@ -68,7 +68,7 @@ and method.
|
||||
|
||||
This endpoint passes the request directly to the hardware driver. The
|
||||
HTTP BODY must be parseable JSON, which will be converted to parameters passed
|
||||
to that function. Unparseable JSON, missing parameters, or excess parameters
|
||||
to that function. Unparsable JSON, missing parameters, or excess parameters
|
||||
will cause the request to be rejected with an HTTP 400 error.
|
||||
|
||||
Normal response code: 200 202
|
||||
|
@ -61,7 +61,7 @@ and method.
|
||||
|
||||
This endpoint passes the request directly to the Node's hardware driver. The
|
||||
HTTP BODY must be parseable JSON, which will be converted to parameters passed
|
||||
to that function. Unparseable JSON, missing parameters, or excess parameters
|
||||
to that function. Unparsable JSON, missing parameters, or excess parameters
|
||||
will cause the request to be rejected with an HTTP 400 error.
|
||||
|
||||
Normal response code: 200 202
|
||||
|
@ -292,7 +292,7 @@ r_description_contains:
|
||||
Filter the list of returned nodes, and only return those containing
|
||||
substring specified by ``description_contains``.
|
||||
in: query
|
||||
requred: false
|
||||
required: false
|
||||
type: string
|
||||
r_driver:
|
||||
description: |
|
||||
@ -581,7 +581,7 @@ bios_setting_name:
|
||||
type: string
|
||||
bios_setting_read_only:
|
||||
description: |
|
||||
This Bios seting is read only and can't be changed.
|
||||
This Bios setting is read only and can't be changed.
|
||||
May be None.
|
||||
in: body
|
||||
required: true
|
||||
|
@ -1,7 +1,7 @@
|
||||
# NOTE(TheJulia): This is a special bindep file which is independent of the
|
||||
# project bindep file which is for general usage. This binde pfile is
|
||||
# intended for execution from Devstack.
|
||||
# The *primary* purpose being, devstack manages sql dependency mangement
|
||||
# The *primary* purpose being, devstack manages sql dependency management
|
||||
# and credential setup, so they can't be included here or it is installed
|
||||
# prematurely.
|
||||
|
||||
|
@ -1293,7 +1293,7 @@ function configure_ironic_dirs {
|
||||
if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then
|
||||
if is_ubuntu; then
|
||||
# NOTE(TheJulia): This is done separately here as this allows
|
||||
# the script to have hirtuse/bionic compatability.
|
||||
# the script to have hirtuse/bionic compatibility.
|
||||
if [[ -f /usr/lib/ipxe/snponly.efi ]]; then
|
||||
update_loader_copy_paths snponly.efi:/usr/lib/ipxe/snponly.efi
|
||||
elif [[ -f /usr/lib/ipxe/ipxe.efi ]]; then
|
||||
@ -1378,7 +1378,7 @@ function configure_ironic_provision_network {
|
||||
if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
|
||||
# NOTE(TheJulia): Ideally we should let this happen
|
||||
# with our global address, but iPXE seems to have in
|
||||
# consistant behavior in this configuration with devstack.
|
||||
# consistent behavior in this configuration with devstack.
|
||||
# so we will setup a dummy interface and use that.
|
||||
if ! ( ping -c1 -w1 $IRONIC_HOST_IPV6 ); then
|
||||
# But before we create an interface, lets make sure it is
|
||||
@ -1745,7 +1745,7 @@ function configure_ironic_conductor {
|
||||
# - generate outside of this script
|
||||
# - pass path in as $IRONIC_ANSIBLE_SSH_KEY
|
||||
# - distribute private key to subnodes under the same path
|
||||
# Similar to what we do for n-g-s, may be even re-use its key.
|
||||
# Similar to what we do for n-g-s, may be even reuse its key.
|
||||
ssh-keygen -t rsa -N '' -f $IRONIC_ANSIBLE_SSH_KEY
|
||||
chmod 600 $IRONIC_ANSIBLE_SSH_KEY
|
||||
fi
|
||||
@ -3065,7 +3065,7 @@ function upload_baremetal_ironic_efiboot {
|
||||
# request *or* we just do it as the project scoped admin using the admin
|
||||
# project which in devstack's case is the demo project.
|
||||
# In other words, we can't use devstack-system-admin to upload the image
|
||||
# unless we set the project_id in the create reqeust.
|
||||
# unless we set the project_id in the create request.
|
||||
IRONIC_EFIBOOT_ID=$(openstack --os-cloud devstack-admin \
|
||||
image create \
|
||||
$efiboot_name \
|
||||
|
@ -78,7 +78,7 @@ def parse_field_list(content):
|
||||
|
||||
|
||||
def create_bullet_list(input_dict, input_build_env):
|
||||
"""Convert input_dict into a sphinx representaion of a bullet list."""
|
||||
"""Convert input_dict into a sphinx representation of a bullet list."""
|
||||
|
||||
grp_field = GroupedField('grp_field', label='title')
|
||||
bullet_list = nodes.paragraph()
|
||||
@ -138,7 +138,7 @@ def split_list(input_list):
|
||||
"""Split input_list into three sub-lists.
|
||||
|
||||
This function splits the input_list into three, one list containing the
|
||||
inital non-empty items, one list containing items appearing after the
|
||||
initial non-empty items, one list containing items appearing after the
|
||||
string 'Success' in input_list; and the other list containing items
|
||||
appearing after the string 'Failure' in input_list.
|
||||
"""
|
||||
|
@ -126,7 +126,7 @@ glance:
|
||||
openstack image create --file ./initrd.img --container-format ari \
|
||||
--disk-format ari --shared anaconda-ramdisk-<version>
|
||||
openstack image create --file ./squashfs.img --container-format ari \
|
||||
--disk-format ari --shared anaconda-stage-<verison>
|
||||
--disk-format ari --shared anaconda-stage-<version>
|
||||
openstack image create --file ./os-image.tar.gz \
|
||||
--container-format bare --disk-format raw --shared \
|
||||
--property kernel_id=<glance_uuid_vmlinuz> \
|
||||
@ -162,14 +162,14 @@ ironic node:
|
||||
back to Ironic and indicate the state. This token is randomly generated
|
||||
for every deploy, and is required. Specifically this is leveraged in the
|
||||
template's ``pre``, ``onerror``, and ``post`` steps.
|
||||
For more infomation on Agent Token, please see :doc:`/admin/agent-token`.
|
||||
For more information on Agent Token, please see :doc:`/admin/agent-token`.
|
||||
|
||||
Standalone deployments
|
||||
----------------------
|
||||
|
||||
While this deployment interface driver was developed around the use of other
|
||||
OpenStack services, it is not explicitly required. For example HTTP(S) URLs
|
||||
can be supplied by the API user to explictly set the expected baremetal node
|
||||
can be supplied by the API user to explicitly set the expected baremetal node
|
||||
``instance_info`` fields
|
||||
|
||||
.. code-block:: shell
|
||||
@ -299,7 +299,7 @@ Limitations
|
||||
* This deploy interface has only been tested with Red Hat based operating
|
||||
systems that use anaconda. Other systems are not supported.
|
||||
|
||||
* Runtime TLS certifiate injection into ramdisks is not supported. Assets
|
||||
* Runtime TLS certificate injection into ramdisks is not supported. Assets
|
||||
such as ``ramdisk`` or a ``stage2`` ramdisk image need to have trusted
|
||||
Certificate Authority certificates present within the images *or* the
|
||||
Ironic API endpoint utilized should utilize a known trusted Certificate
|
||||
|
@ -445,7 +445,7 @@ power will be turned off via the management interface. Afterwards, the
|
||||
While the deployment step framework also supports the
|
||||
``execute_on_child_nodes`` and ``limit_child_node_execution`` parameters,
|
||||
all of the step frameworks have a fundamental limitation in that child node
|
||||
step execution is indended for syncronous actions which do not rely upon
|
||||
step execution is intended for synchronous actions which do not rely upon
|
||||
the ``ironic-python-agent`` running on any child nodes. This constraint may
|
||||
be changed in the future.
|
||||
|
||||
|
@ -115,7 +115,7 @@ Logging
|
||||
|
||||
Logging is implemented as custom Ansible callback module,
|
||||
that makes use of ``oslo.log`` and ``oslo.config`` libraries
|
||||
and can re-use logging configuration defined in the main ironic configuration
|
||||
and can reuse logging configuration defined in the main ironic configuration
|
||||
file to set logging for Ansible events, or use a separate file for this purpose.
|
||||
|
||||
It works best when ``journald`` support for logging is enabled.
|
||||
|
@ -720,7 +720,7 @@ Set BIOS Config
|
||||
baremetal node passthru call <node> set_bios_config --arg "name=value"
|
||||
|
||||
|
||||
Walkthrough of perfoming a BIOS configuration change:
|
||||
Walkthrough of performing a BIOS configuration change:
|
||||
|
||||
The following section demonstrates how to change BIOS configuration settings,
|
||||
detect that a commit and reboot are required, and act on them accordingly. The
|
||||
|
@ -361,7 +361,7 @@ Node configuration
|
||||
before the Xena release.
|
||||
|
||||
* The following parameters are mandatory in ``driver_info``
|
||||
if ``ilo-inspect`` inspect inteface is used and SNMPv3 inspection
|
||||
if ``ilo-inspect`` inspect interface is used and SNMPv3 inspection
|
||||
(`SNMPv3 Authentication` in `HPE iLO4 User Guide`_) is desired:
|
||||
|
||||
* ``snmp_auth_user`` : The SNMPv3 user.
|
||||
@ -891,7 +891,7 @@ The hardware type ``ilo`` supports hardware inspection.
|
||||
an error. This feature is available in proliantutils release
|
||||
version >= 2.2.0.
|
||||
* The iLO must be updated with SNMPv3 authentication details.
|
||||
Pleae refer to the section `SNMPv3 Authentication` in `HPE iLO4 User Guide`_
|
||||
Please refer to the section `SNMPv3 Authentication` in `HPE iLO4 User Guide`_
|
||||
for setting up authentication details on iLO.
|
||||
The following parameters are mandatory to be given in driver_info
|
||||
for SNMPv3 inspection:
|
||||
@ -1807,7 +1807,7 @@ refer to `HPE Integrated Lights-Out REST API Documentation <https://hewlettpacka
|
||||
Allowed values are ``Enabled``, ``Disabled``.
|
||||
|
||||
- ``WorkloadProfile``:
|
||||
Change the Workload Profile to accomodate your desired workload.
|
||||
Change the Workload Profile to accommodate your desired workload.
|
||||
Allowed values are ``GeneralPowerEfficientCompute``,
|
||||
``GeneralPeakFrequencyCompute``, ``GeneralThroughputCompute``,
|
||||
``Virtualization-PowerEfficient``, ``Virtualization-MaxPerformance``,
|
||||
|
@ -177,7 +177,7 @@ Configuration via ``driver_info``
|
||||
- string representing filesystem path to directory which contains
|
||||
certification file: In this case, iRMC driver uses certification file
|
||||
stored at specified directory. Ironic conductor must be able to access
|
||||
that directory. For iRMC to recongnize certification file, Ironic user
|
||||
that directory. For iRMC to recognize certification file, Ironic user
|
||||
must run ``openssl rehash <path_to_dir>``.
|
||||
|
||||
- string representing filesystem path to certification file: In this case,
|
||||
|
@ -97,7 +97,7 @@ field:
|
||||
.. note::
|
||||
The ``redfish_address``, ``redfish_username``, ``redfish_password``,
|
||||
and ``redfish_verify_ca`` fields, if changed, will trigger a new session
|
||||
to be establsihed and cached with the BMC. The ``redfish_auth_type`` field
|
||||
to be established and cached with the BMC. The ``redfish_auth_type`` field
|
||||
will only be used for the creation of a new cached session, or should
|
||||
one be rejected by the BMC.
|
||||
|
||||
@ -446,7 +446,7 @@ of the remaining updates will pause. When the node is taken out of maintenance
|
||||
mode, processing of the remaining updates will continue.
|
||||
|
||||
When updating the BMC firmware, the BMC may become unavailable for a period of
|
||||
time as it resets. In this case, it may be desireable to have the cleaning step
|
||||
time as it resets. In this case, it may be desirable to have the cleaning step
|
||||
wait after the update has been applied before indicating that the
|
||||
update was successful. This allows the BMC time to fully reset before further
|
||||
operations are carried out against it. To cause the cleaning step to wait after
|
||||
@ -626,7 +626,7 @@ Create Subscription
|
||||
:widths: 25, 15, 15, 90
|
||||
|
||||
"Destination", "body", "string", "The URI of the destination Event Service"
|
||||
"EventTypes (optional)", "body", "array", "List of ypes of events that shall be sent to the destination"
|
||||
"EventTypes (optional)", "body", "array", "List of types of events that shall be sent to the destination"
|
||||
"Context (optional)", "body", "string", "A client-supplied string that is stored with the event destination
|
||||
subscription"
|
||||
"Protocol (optional)", "body", "string", "The protocol type that the event will use for sending
|
||||
|
@ -213,7 +213,7 @@ to consume the output of the burn-in steps more easily, or even in real-time,
|
||||
the nodes can be configured to store the output of the individual steps to
|
||||
files in the ramdisk (from where they can be picked up by a logging pipeline).
|
||||
|
||||
The configuration of the outpout file is done via one of
|
||||
The configuration of the output file is done via one of
|
||||
``agent_burnin_cpu_outputfile``, ``agent_burnin_vm_outputfile``,
|
||||
``agent_burnin_fio_disk_outputfile``, and
|
||||
``agent_burnin_fio_network_outputfile`` parameters which need to be added
|
||||
|
@ -2,7 +2,7 @@ Inspection hooks
|
||||
================
|
||||
|
||||
*Inspection hooks* are a type of the Bare Metal service plug-ins responsible
|
||||
for processing data from in-band inspection. By confuguring these hooks, an
|
||||
for processing data from in-band inspection. By configuring these hooks, an
|
||||
operator can fully customize the inspection processing phase. How the data is
|
||||
collected can be configured with `inspection collectors
|
||||
<https://docs.openstack.org/ironic-python-agent/latest/admin/how_it_works.html#inspection-data>`_.
|
||||
|
@ -34,7 +34,7 @@ option:
|
||||
[inspector]
|
||||
callback_endpoint_override = https://example.com/baremetal-introspection/v1/continue
|
||||
|
||||
For the built-in inspection, the bare metal API endpoint can be overriden
|
||||
For the built-in inspection, the bare metal API endpoint can be overridden
|
||||
instead:
|
||||
|
||||
.. code-block:: ini
|
||||
|
@ -107,7 +107,7 @@ appended to the kernel command line::
|
||||
``nomodeset`` as an option. This option is intended for troubleshooting,
|
||||
and can greatly degrade performance with Matrox/Aspeed BMC Graphics
|
||||
controllers which is very commonly used on physical servers. The
|
||||
performance degredation can greatly reduce IO capacity upon every
|
||||
performance degradation can greatly reduce IO capacity upon every
|
||||
console graphics update being written to the screen.
|
||||
|
||||
Common options
|
||||
|
@ -55,11 +55,11 @@ is being worked in Neutron
|
||||
specific issues, but the OVN DHCP server is an entirely different server
|
||||
than the interfaces were tested upon.
|
||||
|
||||
Maxmium Transmission Units
|
||||
Maximum Transmission Units
|
||||
--------------------------
|
||||
|
||||
OVN's handling of MTUs has been identified by OVN as being incomplete.
|
||||
The reality is that it assumes the MTU is not further constained beyond
|
||||
The reality is that it assumes the MTU is not further constrained beyond
|
||||
the gateway, which sort of works in some caess for virtual machines, but
|
||||
might not be applicable with baremetal because your traffic may pass
|
||||
through lower, or higher MTUs.
|
||||
@ -106,7 +106,7 @@ a higher operational security posture for the deployment. Users of the
|
||||
Rescue
|
||||
------
|
||||
|
||||
Due to the aformentioned NAT issues, we know Rescue operations may not work.
|
||||
Due to the aforementioned NAT issues, we know Rescue operations may not work.
|
||||
|
||||
This is being tracked as `bug 2033083 <https://bugs.launchpad.net/ironic/+bug/2033083>`_.
|
||||
|
||||
|
@ -25,7 +25,7 @@ misbehaving BMCs which may delay or even block the synchronization otherwise.
|
||||
logs). While Ironic automatically retries to get the power status
|
||||
for the affected nodes, the failure rate may be reduced by increasing
|
||||
the power sync cycle, e.g. to 300 seconds, and/or by reducing the number
|
||||
of power sync workers, e.g. to 2. Pleae keep in mind, however, that
|
||||
of power sync workers, e.g. to 2. Please keep in mind, however, that
|
||||
depending on the concrete setup increasing the power sync interval may
|
||||
have an impact on other components relying on up-to-date power states.
|
||||
|
||||
|
@ -5,7 +5,7 @@ Ironic supports booting a user provided ramdisk or an ISO image (starting with
|
||||
the Victoria release) instead of deploying a node.
|
||||
Most commonly this is performed when an instance is booted via PXE, iPXE or
|
||||
Virtual Media, with the only local storage contents being those in memory.
|
||||
It is suported by ``pxe``, ``ipxe``, ``redfish-virtual-media`` and
|
||||
It is supported by ``pxe``, ``ipxe``, ``redfish-virtual-media`` and
|
||||
``ilo-virtual-media`` boot interfaces.
|
||||
|
||||
Configuration
|
||||
|
@ -72,7 +72,7 @@ will hence not be eligible for scheduling of new instances.
|
||||
|
||||
Equally, nodes with ``retired`` set to True cannot move from ``manageable``
|
||||
to ``available``: the ``provide`` verb is blocked. This is to prevent
|
||||
accidental re-use of nodes tagged for removal from the fleet. In order
|
||||
accidental reuse of nodes tagged for removal from the fleet. In order
|
||||
to move these nodes to ``available`` none the less, the ``retired`` field
|
||||
needs to be removed first. This can be done via::
|
||||
|
||||
|
@ -15,7 +15,7 @@ That being said, it would likely help anyone working to implement
|
||||
customization of these policies to consult some reference material
|
||||
in hopes of understanding the context.
|
||||
|
||||
* `Keystone Adminstrator Guide - Service API Protection <https://docs.openstack.org/keystone/latest/admin/service-api-protection.html>`_
|
||||
* `Keystone Administrator Guide - Service API Protection <https://docs.openstack.org/keystone/latest/admin/service-api-protection.html>`_
|
||||
* `Ironic Scoped Role Based Access Control Specification <https://specs.openstack.org/openstack/ironic-specs/specs/17.0/secure-rbac.html>`_
|
||||
|
||||
Historical Context - How we reached our access model
|
||||
@ -64,7 +64,7 @@ indicate an associated ``project_id`` value.
|
||||
|
||||
The Secure RBAC model, since the introduction of the base capability has been
|
||||
extended as a result of an OpenStack community goal to include a ``manager``
|
||||
role in the project scope. By default, this access is equivelent to a Project
|
||||
role in the project scope. By default, this access is equivalent to a Project
|
||||
scoped ``admin`` user, however it may be delineated further as time moves
|
||||
forward.
|
||||
|
||||
|
@ -28,7 +28,7 @@ Similarities to Cleaning and Deployment
|
||||
=======================================
|
||||
|
||||
Similar to Clean and Deploy steps, when invoked an operator can validate
|
||||
the curent running steps by viewing the ``driver_internal_info`` field
|
||||
the current running steps by viewing the ``driver_internal_info`` field
|
||||
looking for a ``service_steps`` field. The *current* step being executed
|
||||
can be viewed using the baremetal node ``service_step`` field, which is a
|
||||
top level field.
|
||||
|
@ -191,7 +191,7 @@ location of the files will depend on the way you've built the ramdisk.
|
||||
|
||||
This will allow you to run commands within the filesystem, e.g. use package
|
||||
manager. If the ramdisk is also systemd-based, and you have login
|
||||
credentials set up, you can even boot a real ramdisk enviroment with
|
||||
credentials set up, you can even boot a real ramdisk environment with
|
||||
|
||||
::
|
||||
|
||||
@ -548,7 +548,7 @@ of such wait states includes:
|
||||
|
||||
* ``clean wait`` for cleaning,
|
||||
* ``inspect wait`` for introspection,
|
||||
* ``rescue wait`` for rescueing, and
|
||||
* ``rescue wait`` for rescuing, and
|
||||
* ``wait call-back`` for deploying.
|
||||
|
||||
Communication issues between the conductor and the node
|
||||
@ -641,7 +641,7 @@ stable state, and the node should not be provisioned. If the
|
||||
``baremetal port delete`` command fails, this may indicate that
|
||||
a known VIF is still attached. Generally if they are transitory from cleaning,
|
||||
provisioning, rescuing, or even inspection, getting the node to the
|
||||
``available`` state wil unblock your delete operation, that is unless there is
|
||||
``available`` state will unblock your delete operation, that is unless there is
|
||||
a tenant VIF attahment. In that case, the vif will need to be removed from
|
||||
with-in the Bare Metal service using the
|
||||
``baremetal node vif detach`` command.
|
||||
@ -652,7 +652,7 @@ the port's ``internal_info`` field.
|
||||
.. warning::
|
||||
The ``maintenance`` flag can be used to force the node's port to be
|
||||
deleted, however this will disable any check that would normally block
|
||||
the user from issuing a delete and accidently orphaning the VIF attachment
|
||||
the user from issuing a delete and accidentally orphaning the VIF attachment
|
||||
record.
|
||||
|
||||
How do I resolve this?
|
||||
@ -682,8 +682,8 @@ Using that, you can delete the port. Example:
|
||||
My test VM image does not deploy -- mount point does not exist
|
||||
==============================================================
|
||||
|
||||
What is likely occuring
|
||||
-----------------------
|
||||
What is likely occurring
|
||||
------------------------
|
||||
|
||||
The image attempting to be deployed likely is a partition image where
|
||||
the file system that the user wishes to boot from lacks the required
|
||||
@ -845,7 +845,7 @@ How did I get here?
|
||||
One of the major consumers of memory in a host running an ironic-conductor is
|
||||
transformation of disk images using the ``qemu-img`` tool. This tool, because
|
||||
the disk images it works with are both compressed and out of linear block
|
||||
order, requires a considerable amount of memory to efficently re-assemble
|
||||
order, requires a considerable amount of memory to efficiently re-assemble
|
||||
and write-out a disk to a device, or to simply convert the format such as
|
||||
to a ``raw`` image.
|
||||
|
||||
@ -986,7 +986,7 @@ baremetal operators.
|
||||
These settings can be modified by using the
|
||||
``[conductor]max_concurrent_deploy`` and ``[conductor]max_concurrent_clean``
|
||||
settings from the ironic.conf file supporting the ``ironic-conductor``
|
||||
service. Neither setting can be explicity disabled, however there is also no
|
||||
service. Neither setting can be explicitly disabled, however there is also no
|
||||
upper limit to the setting.
|
||||
|
||||
.. note::
|
||||
@ -1112,7 +1112,7 @@ basic troubleshooting steps:
|
||||
* If the version of ironic supports the feature, consult the node history
|
||||
log, ``baremetal node history list`` and
|
||||
``baremetal node history get <uuid>``.
|
||||
* Consult the acutal console screen of the physical machine. *If* the ramdisk
|
||||
* Consult the actual console screen of the physical machine. *If* the ramdisk
|
||||
booted, you will generally want to investigate the controller logs and see
|
||||
if an uploaded agent log is being stored on the conductor responsible for
|
||||
the baremetal node. Consult `Retrieving logs from the deploy ramdisk`_.
|
||||
|
@ -167,7 +167,7 @@ user.
|
||||
|
||||
Often, depending on load, query patterns, periodic tasks, and so on and so
|
||||
forth, additional indexes may be needed to help provide hints to the database
|
||||
so it can most efficently attempt to reduce the number of rows which need to
|
||||
so it can most efficiently attempt to reduce the number of rows which need to
|
||||
be examined in order to return a result set.
|
||||
|
||||
Adding indexes
|
||||
|
@ -81,7 +81,7 @@ Feature Submission Process
|
||||
#. As soon as a member of the team acknowledges the bug,
|
||||
we will move it to the 'Review' state. As time goes on, Discussion
|
||||
about the RFE, and whether to approve it will occur. If the RFE has not
|
||||
been triaged and you'd like it to recieve immediate attention, add it to
|
||||
been triaged and you'd like it to receive immediate attention, add it to
|
||||
the Open Discussion section of our
|
||||
`weekly meeting agenda <https://wiki.openstack.org/wiki/Meetings/Ironic>`,
|
||||
and, timezone permitting, attend the meeting to advocate for your RFE.
|
||||
|
@ -13,7 +13,7 @@ standard pattern of entirely python based unit testing. In part this was done
|
||||
for purposes of speed and to keep the declaration of the test context.
|
||||
|
||||
This also lended itself to be very useful due to the nature of A/B testing
|
||||
which is requried to properly migrate the Ironic project from a project
|
||||
which is required to properly migrate the Ironic project from a project
|
||||
scoped universe where an ``admin project`` is utilized as the authenticating
|
||||
factor coupled with two custom roles, ``baremetal_admin``, and
|
||||
``baremetal_observer``.
|
||||
@ -41,7 +41,7 @@ back would have been a heavier lift. As such, the tests largely look for
|
||||
one of the following error codes.
|
||||
|
||||
* 200 - Got the item from the API - This is an database driven interaction.
|
||||
* 201 - Created - This is databaes driven interaction. These are rare.
|
||||
* 201 - Created - This is database driven interaction. These are rare.
|
||||
* 204 - Accepted - This is a database driven interaction. These are rare.
|
||||
* 403 - Forbidden - This tells us the policy worked as expected where
|
||||
access was denied.
|
||||
@ -78,7 +78,7 @@ Some general rules apply
|
||||
Remember: owner and lessee admins are closer to System scoped Admin Members.
|
||||
* Members can do some things, but not everything
|
||||
* Readers can always read, but as we get into sensitive data later on
|
||||
such as fields containing infrastucture internal addresses, these values
|
||||
such as fields containing infrastructure internal addresses, these values
|
||||
will become hidden and additional tests will examine this.
|
||||
* Third party, or external/other Admins will find nothing but sadness
|
||||
in empty lists, 403, 404, or even 500 errors.
|
||||
@ -91,7 +91,7 @@ tests Role Based Access Control related capabilities will come in a
|
||||
series of phases, styles vary a little.
|
||||
|
||||
The first phase is ``"legacy"``. In essence these are partially
|
||||
programatically generated and then human reviewed and values populated
|
||||
programmatically generated and then human reviewed and values populated
|
||||
with expected values.
|
||||
|
||||
The second phase is remarkably similar to ``legacy``. It is the safety net
|
||||
@ -108,7 +108,7 @@ to the ``legacy`` tests. The major difference being some header values,
|
||||
and a user with a ``member`` role in the ``system`` scope now has some
|
||||
rights.
|
||||
|
||||
The forth phase, is implementaiton of ``owner`` and ``lessee`` aware
|
||||
The forth phase, is implementation of ``owner`` and ``lessee`` aware
|
||||
project scoping. The testing approach is similar, however it is much more of
|
||||
a "shotgun" approach. We test what we know should work, and what know should
|
||||
not work, but we do not have redundant testing for each role as ``admin``
|
||||
|
@ -6,7 +6,7 @@ Contributor Vision
|
||||
|
||||
Background
|
||||
==========
|
||||
During the Rocky Project Teams Gathering (Feburary/March 2018),
|
||||
During the Rocky Project Teams Gathering (February/March 2018),
|
||||
The contributors in the room at that time took a few minutes to write out
|
||||
each contributor's vision of where they see ironic in five years time.
|
||||
|
||||
|
@ -26,7 +26,7 @@ IPMI is a relatively old protocol and may require additional set up on the
|
||||
hardware side that the Bare Metal service cannot do automatically:
|
||||
|
||||
#. Make sure IPMI is enabled and the account you use have the permissions
|
||||
to change power and boot devices. By default the adminstrator rights are
|
||||
to change power and boot devices. By default the administrator rights are
|
||||
expected, you can change it: see :ref:`ipmi-priv-level`.
|
||||
|
||||
#. Make sure the cipher suites are configured for maximum security. Suite 17 is
|
||||
|
@ -13,7 +13,7 @@ Configure ironic PXE driver for provisioning using IPv6 addressing
|
||||
The PXE drivers operate in such a way that they are able to utilize
|
||||
both IPv4 and IPv6 addresses based upon the deployment's operating state and
|
||||
configuration. Internally, the drivers attempt to prepare configuration options for both formats, which allows ports which are IPv6 only to automatically
|
||||
receieve boot parameters. As a result of this, it is critical that the
|
||||
receive boot parameters. As a result of this, it is critical that the
|
||||
``[DEFAULT]my_ipv6`` configuration parameter is set to the conductor's
|
||||
IPv6 address. This option is unique per conductor, and due to the nature
|
||||
of automatic address assignment, it cannot be "guessed" by the software.
|
||||
|
@ -12,12 +12,12 @@ as the underlying resources to provision the node must be available for
|
||||
successful operations.
|
||||
|
||||
|
||||
.. Warning:: This docuemntation is geared for use of OVS with Neutron along
|
||||
.. Warning:: This documentation is geared for use of OVS with Neutron along
|
||||
with the ``neutron-dhcp-agent``. It *is* possible to use OVN
|
||||
with ``neutron-dhcp-agent``, and depending on version of OVN
|
||||
and Neutron, OVN's own DHCP service for IPv4 clients, but that
|
||||
is considered an advanced topic, and we encourage operators
|
||||
interested in use of OVN to fully undestand it's capabilities
|
||||
interested in use of OVN to fully understand it's capabilities
|
||||
and state before attempting to utilize such a configuration.
|
||||
Please see :doc:`/admin/ovn-networking` for more details.
|
||||
|
||||
|
@ -300,7 +300,7 @@ on the Bare Metal service node(s) where ``ironic-conductor`` is running.
|
||||
Setting the iPXE parameters noted in the code block above to no value,
|
||||
in other words setting a line to something like ``ipxe_bootfile_name=``
|
||||
will result in ironic falling back to the default values of the non-iPXE
|
||||
PXE settings. This is for backwards compatability.
|
||||
PXE settings. This is for backwards compatibility.
|
||||
|
||||
#. Ensure iPXE is the default PXE, if applicable.
|
||||
|
||||
@ -520,7 +520,7 @@ You may also populate other IPA options (e.g. ``ipa-debug=1`` for detailed
|
||||
logging, ``ipa-inspection-collectors`` to customize the inspection process,
|
||||
or ``ipa-api-url`` to enable :doc:`/admin/fast-track`).
|
||||
|
||||
Second, you need to configure DHCP for unknows hosts since the OpenStack
|
||||
Second, you need to configure DHCP for unknown hosts since the OpenStack
|
||||
Networking service won't be able to handle them. For instance, you can install
|
||||
**dnsmasq** and use the following ``/etc/dnsmasq.conf``:
|
||||
|
||||
|
@ -59,7 +59,7 @@ Bare Metal service comes with an example file for configuring the
|
||||
/var/log/httpd).
|
||||
|
||||
#. Stop and disable the ironic-api service. If ironic-api service is
|
||||
started, the port will be occupied. Apach will fail to start:
|
||||
started, the port will be occupied. Apache will fail to start:
|
||||
|
||||
Fedora/RHEL8/CentOS8/SUSE::
|
||||
|
||||
|
@ -66,7 +66,7 @@ You should make the following changes to ``/etc/ironic/ironic.conf``:
|
||||
rpc_transport = json-rpc
|
||||
|
||||
JSON RPC also has its own authentication strategy. If it is not specified then
|
||||
the stategy defaults to ``[DEFAULT]`` ``auth_strategy``. The following will
|
||||
the strategy defaults to ``[DEFAULT]`` ``auth_strategy``. The following will
|
||||
set JSON RPC to ``noauth``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
@ -76,7 +76,7 @@ You need to specify image information in the node's ``instance_info``
|
||||
* In case the image source requires HTTP(s) Basic Authentication ``RFC 7616``
|
||||
then the relevant authentication strategy has to be configured as
|
||||
``http_basic`` and supplied with credentials in the ironic global config
|
||||
file. Further infromation about the authentication strategy selection
|
||||
file. Further information about the authentication strategy selection
|
||||
can be found in :doc:`/admin/user-image-basic-auth`.
|
||||
|
||||
* ``root_gb`` - size of the root partition, required for partition images.
|
||||
|
@ -271,22 +271,22 @@ class AllocationsController(pecan.rest.RestController):
|
||||
:fields: fields
|
||||
:owner: r_owner
|
||||
"""
|
||||
requestor = api_utils.check_list_policy('allocation', owner)
|
||||
requester = api_utils.check_list_policy('allocation', owner)
|
||||
|
||||
self._check_allowed_allocation_fields(fields)
|
||||
if owner is not None and not api_utils.allow_allocation_owner():
|
||||
# Requestor has asked for an owner field/column match, but
|
||||
# Requester has asked for an owner field/column match, but
|
||||
# their client version does not support it.
|
||||
raise exception.NotAcceptable()
|
||||
if (owner is not None
|
||||
and requestor is not None
|
||||
and owner != requestor):
|
||||
# The requestor is asking about other owner's records.
|
||||
and requester is not None
|
||||
and owner != requester):
|
||||
# The requester is asking about other owner's records.
|
||||
# Naughty!
|
||||
raise exception.NotAuthorized()
|
||||
|
||||
if requestor is not None:
|
||||
owner = requestor
|
||||
if requester is not None:
|
||||
owner = requester
|
||||
|
||||
return self._get_allocations_collection(node, resource_class, state,
|
||||
owner, marker, limit,
|
||||
@ -383,7 +383,7 @@ class AllocationsController(pecan.rest.RestController):
|
||||
if req_alloc_owner != project_id:
|
||||
msg = _("Cannot create allocation with an owner "
|
||||
"Project ID value %(req_owner)s not matching "
|
||||
"the requestor Project ID %(project)s. "
|
||||
"the requester Project ID %(project)s. "
|
||||
"Policy baremetal:allocation:create_restricted"
|
||||
" is required for this capability."
|
||||
) % {'req_owner': req_alloc_owner,
|
||||
@ -427,7 +427,7 @@ class AllocationsController(pecan.rest.RestController):
|
||||
if not api_utils.check_policy_true(
|
||||
'baremetal:allocation:create_restricted'):
|
||||
owner = cdict.get('project_id')
|
||||
# Filter the candidate search by the requestor project ID
|
||||
# Filter the candidate search by the requester project ID
|
||||
# if any. The result is processes authenticating with system
|
||||
# scope will not be impacted, where as project scoped requests
|
||||
# will need additional authorization.
|
||||
|
@ -79,7 +79,7 @@ def get_next(collection, limit, url, key_field='uuid', **kwargs):
|
||||
|
||||
fields = kwargs.pop('fields', None)
|
||||
# NOTE(saga): If fields argument is present in kwargs and not None. It
|
||||
# is a list so convert it into a comma seperated string.
|
||||
# is a list so convert it into a comma separated string.
|
||||
if fields:
|
||||
kwargs['fields'] = ','.join(fields)
|
||||
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
|
||||
|
@ -313,7 +313,7 @@ class DeployTemplatesController(rest.RestController):
|
||||
# This is due to the fact that the patch operation is always applied in
|
||||
# the API. Ways to avoid this include passing the patch to the
|
||||
# conductor to apply while holding a lock, or a collision detection
|
||||
# & retry mechansim using e.g. the updated_at field.
|
||||
# & retry mechanism using e.g. the updated_at field.
|
||||
notify.emit_start_notification(context, rpc_template, 'update')
|
||||
with notify.handle_error_notification(context, rpc_template, 'update'):
|
||||
rpc_template.save()
|
||||
|
@ -923,7 +923,7 @@ class NodeStatesController(rest.RestController):
|
||||
'modes': ', '.join(ALLOWED_TARGET_BOOT_MODES)})
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
# NOTE(cenne): This currenly includes the ADOPTING state
|
||||
# NOTE(cenne): This currently includes the ADOPTING state
|
||||
if rpc_node.provision_state in ir_states.UNSTABLE_STATES:
|
||||
msg = _("Node is in %(state)s state. Since node is transitioning, "
|
||||
"the boot mode will not be set as this may interfere "
|
||||
@ -971,7 +971,7 @@ class NodeStatesController(rest.RestController):
|
||||
{'state': target})
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
# NOTE(cenne): This currenly includes the ADOPTING state
|
||||
# NOTE(cenne): This currently includes the ADOPTING state
|
||||
if rpc_node.provision_state in ir_states.UNSTABLE_STATES:
|
||||
msg = _("Node is in %(state)s state. Since node is transitioning, "
|
||||
"the boot mode will not be set as this may interfere "
|
||||
@ -1648,7 +1648,7 @@ def node_sanitize(node, fields, cdict=None,
|
||||
:type fields: list of str
|
||||
:param cdict: Context dictionary for policy values evaluation.
|
||||
If not provided, it will be executed by the method,
|
||||
however for enumerating node lists, it is more efficent
|
||||
however for enumerating node lists, it is more efficient
|
||||
to provide.
|
||||
:param show_driver_secrets: A boolean value to allow external single
|
||||
evaluation of policy instead of once per
|
||||
@ -1781,7 +1781,7 @@ def _node_sanitize_extended(node, node_keys, target_dict, cdict):
|
||||
and not policy.check("baremetal:node:get:last_error",
|
||||
target_dict, cdict)):
|
||||
# Guard the last error from being visible as it can contain
|
||||
# hostnames revealing infrastucture internal details.
|
||||
# hostnames revealing infrastructure internal details.
|
||||
node['last_error'] = ('** Value Redacted - Requires '
|
||||
'baremetal:node:get:last_error '
|
||||
'permission. **')
|
||||
@ -2822,7 +2822,7 @@ class NodesController(rest.RestController):
|
||||
|
||||
if requested_owner and requested_owner != project_id:
|
||||
# Translation: If project scoped, and an owner has been
|
||||
# requested, and that owner does not match the requestor's
|
||||
# requested, and that owner does not match the requester's
|
||||
# project ID value.
|
||||
msg = _("Cannot create a node as a project scoped admin "
|
||||
"with an owner other than your own project.")
|
||||
|
@ -366,7 +366,7 @@ def patched_validate_with_schema(patched_dict, schema, validator=None):
|
||||
updates applied
|
||||
:param schema: Any dict key not in the schema will be deleted from the
|
||||
dict. If no validator is specified then the resulting ``patched_dict``
|
||||
will be validated agains the schema
|
||||
will be validated against the schema
|
||||
:param validator: Optional validator to use if there is extra validation
|
||||
required beyond the schema
|
||||
:raises: exception.Invalid if validation fails
|
||||
|
@ -94,7 +94,7 @@ BASE_VERSION = 1
|
||||
# v1.54: Add events support.
|
||||
# v1.55: Add deploy templates API.
|
||||
# v1.56: Add support for building configdrives.
|
||||
# v1.57: Add support for updating an exisiting allocation.
|
||||
# v1.57: Add support for updating an existing allocation.
|
||||
# v1.58: Add support for backfilling allocations.
|
||||
# v1.59: Add support vendor data in configdrives.
|
||||
# v1.60: Add owner to the allocation object.
|
||||
|
@ -75,7 +75,7 @@ class FunctionDefinition(object):
|
||||
#: The function arguments (list of :class:`FunctionArgument`)
|
||||
self.arguments = []
|
||||
|
||||
#: If the body carry the datas of a single argument, its type
|
||||
#: If the body carry the data of a single argument, its type
|
||||
self.body_type = None
|
||||
|
||||
#: Status code
|
||||
@ -86,7 +86,7 @@ class FunctionDefinition(object):
|
||||
#: exceptions
|
||||
self.ignore_extra_args = False
|
||||
|
||||
#: Dictionnary of protocol-specific options.
|
||||
#: Dictionary of protocol-specific options.
|
||||
self.extra_options = None
|
||||
|
||||
@staticmethod
|
||||
@ -145,7 +145,7 @@ class signature(object):
|
||||
:param body: If the function takes a final argument that is supposed to be
|
||||
the request body by itself, its type.
|
||||
:param status_code: HTTP return status code of the function.
|
||||
:param ignore_extra_args: Allow extra/unknow arguments (default to False)
|
||||
:param ignore_extra_args: Allow extra/unknown arguments (default to False)
|
||||
|
||||
Most of the time this decorator is not supposed to be used directly,
|
||||
unless you are not using WSME on top of another framework.
|
||||
|
@ -112,7 +112,7 @@ def body(body_arg):
|
||||
|
||||
|
||||
def format_exception(excinfo, debug=False):
|
||||
"""Extract informations that can be sent to the client."""
|
||||
"""Extract information that can be sent to the client."""
|
||||
error = excinfo[1]
|
||||
code = getattr(error, 'code', None)
|
||||
if code and code in http_client.responses and (400 <= code < 500):
|
||||
|
@ -35,7 +35,7 @@ def _error_about_sqlite_usage():
|
||||
# TODO(TheJulia): Make this a hard error in C*
|
||||
LOG.error('We have detected the API is being launched with a SQLite '
|
||||
'database backend. This is unsupported, and will be a hard '
|
||||
'error in the future. This is becaues multi-process use of '
|
||||
'error in the future. This is because multi-process use of '
|
||||
'a single SQLite database is problematic in terms of '
|
||||
'locking. A single process ironic model exists for use with '
|
||||
'SQLite.')
|
||||
|
@ -327,7 +327,7 @@ def validate(*args, **kwargs):
|
||||
"""Decorator which validates and transforms function arguments
|
||||
|
||||
"""
|
||||
assert not args, 'Validators must be specifed by argument name'
|
||||
assert not args, 'Validators must be specified by argument name'
|
||||
assert kwargs, 'No validators specified'
|
||||
validators = kwargs
|
||||
|
||||
|
@ -87,7 +87,7 @@ def get_client(context, auth_from_config=False):
|
||||
auth=user_auth or service_auth)
|
||||
|
||||
# Re-determine the endpoint so we can work with versions prior to
|
||||
# Yoga, becuase the endpoint, based upon configuration, may require
|
||||
# Yoga, because the endpoint, based upon configuration, may require
|
||||
# project_id specific URLs.
|
||||
if user_auth:
|
||||
endpoint = keystone.get_endpoint('cinder', session=sess,
|
||||
|
@ -552,7 +552,7 @@ def create_boot_iso(context, output_filename, kernel_href,
|
||||
|
||||
elif CONF.esp_image:
|
||||
esp_image_path = CONF.esp_image
|
||||
# TODO(TheJulia): we should opportunisticly try to make bios
|
||||
# TODO(TheJulia): we should opportunistically try to make bios
|
||||
# bootable and UEFI. In other words, collapse a lot of this
|
||||
# path since they are not mutually exclusive.
|
||||
# UEFI boot mode, but Network iPXE -> ISO means bios bootable
|
||||
|
@ -139,7 +139,7 @@ def get_service_auth(context, endpoint, service_auth,
|
||||
:param context: The RequestContext instance from which the user
|
||||
auth_token is extracted.
|
||||
:param endpoint: The requested endpoint to be utilized.
|
||||
:param service_auth: The service authenticaiton credentals to be
|
||||
:param service_auth: The service authentication credentals to be
|
||||
used.
|
||||
:param only_service_auth: Boolean, default False. When set to True,
|
||||
the resulting Service token pair is generated
|
||||
|
@ -748,7 +748,7 @@ node_policies = [
|
||||
deprecated_rule=deprecated_node_clear_maintenance
|
||||
),
|
||||
|
||||
# NOTE(TheJulia): This should liekly be deprecated and be replaced with
|
||||
# NOTE(TheJulia): This should likely be deprecated and be replaced with
|
||||
# a cached object.
|
||||
policy.DocumentedRuleDefault(
|
||||
name='baremetal:node:get_boot_device',
|
||||
@ -977,7 +977,7 @@ node_policies = [
|
||||
name='baremetal:node:history:get',
|
||||
check_str=SYSTEM_OR_OWNER_READER,
|
||||
scope_types=['system', 'project'],
|
||||
description='Filter to allow operators to retreive history records '
|
||||
description='Filter to allow operators to retrieve history records '
|
||||
'for a node.',
|
||||
operations=[
|
||||
{'path': '/nodes/{node_ident}/history', 'method': 'GET'},
|
||||
@ -1929,7 +1929,7 @@ def init_enforcer(policy_file=None, rules=None,
|
||||
rules=rules,
|
||||
default_rule=default_rule,
|
||||
use_conf=use_conf)
|
||||
# NOTE(melwitt): Explictly disable the warnings for policies
|
||||
# NOTE(melwitt): Explicitly disable the warnings for policies
|
||||
# changing their default check_str. During policy-defaults-refresh
|
||||
# work, all the policy defaults have been changed and warning for
|
||||
# each policy started filling the logs limit for various tool.
|
||||
|
@ -780,7 +780,7 @@ def get_instance_image_info(task, ipxe_enabled=False):
|
||||
# told anaconda how to execute.
|
||||
if i_info.get('ks_template'):
|
||||
# If the value is set, we always overwrite it, in the event
|
||||
# a rebuild is occuring or something along those lines.
|
||||
# a rebuild is occurring or something along those lines.
|
||||
node.set_driver_internal_info('ks_template',
|
||||
i_info['ks_template'])
|
||||
else:
|
||||
|
@ -114,7 +114,7 @@ def is_valid_logical_name(hostname):
|
||||
"""Determine if a logical name is valid.
|
||||
|
||||
The logical name may only consist of RFC3986 unreserved
|
||||
characters, to wit:
|
||||
characters:
|
||||
|
||||
ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
"""
|
||||
|
@ -212,7 +212,7 @@ def _allocate_node(context, allocation, nodes):
|
||||
try:
|
||||
# NOTE(dtantsur): retries are done for all nodes above, so disable
|
||||
# per-node retry. Also disable loading the driver, since the
|
||||
# current conductor may not have the requried hardware type or
|
||||
# current conductor may not have the required hardware type or
|
||||
# interfaces (it's picked at random).
|
||||
with task_manager.acquire(context, node.uuid, shared=False,
|
||||
retry=False, load_driver=False,
|
||||
|
@ -127,7 +127,7 @@ class BaseConductorManager(object):
|
||||
"""Initialize the conductor host.
|
||||
|
||||
:param admin_context: the admin context to pass to periodic tasks.
|
||||
:param start_consoles: If consoles should be started in intialization.
|
||||
:param start_consoles: If consoles should be started in initialization.
|
||||
:param start_allocations: If allocations should be started in
|
||||
initialization.
|
||||
:raises: RuntimeError when conductor is already running.
|
||||
|
@ -275,7 +275,7 @@ def execute_step_on_child_nodes(task, step):
|
||||
:param task: The TaskManager object for the parent node.
|
||||
:param step: The requested step to be executed.
|
||||
:returns: None on Success, the resulting error message if a
|
||||
failure has occured.
|
||||
failure has occurred.
|
||||
"""
|
||||
# NOTE(TheJulia): We could just use nodeinfo list calls against
|
||||
# dbapi.
|
||||
|
@ -520,7 +520,7 @@ def execute_step_on_child_nodes(task, step):
|
||||
:param task: The TaskManager object for the parent node.
|
||||
:param step: The requested step to be executed.
|
||||
:returns: None on Success, the resulting error message if a
|
||||
failure has occured.
|
||||
failure has occurred.
|
||||
"""
|
||||
# NOTE(TheJulia): We could just use nodeinfo list calls against
|
||||
# dbapi.
|
||||
|
@ -2682,7 +2682,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
except AttributeError:
|
||||
# TODO(TheJulia): Remove this at some point, but right now
|
||||
# don't inherently break on version mismatches when people
|
||||
# disregard requriements.
|
||||
# disregard requirements.
|
||||
LOG.warning(
|
||||
'get_sensors_data has been configured to collect '
|
||||
'conductor metrics, however the installed ironic-lib '
|
||||
@ -2690,7 +2690,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
'ironic-lib to a minimum of version 5.4.0.')
|
||||
except Exception as e:
|
||||
LOG.exception(
|
||||
"An unknown error occured while attempting to collect "
|
||||
"An unknown error occurred while attempting to collect "
|
||||
"sensor data from within the conductor. Error: %(error)s",
|
||||
{'error': e})
|
||||
else:
|
||||
@ -3595,7 +3595,7 @@ class ConductorManager(base_manager.BaseConductorManager):
|
||||
{'node': node_id})
|
||||
# Allow lookup to work by returning a value, it is just an
|
||||
# unusable value that can't be verified against.
|
||||
# This is important if the agent lookup has occured with
|
||||
# This is important if the agent lookup has occurred with
|
||||
# pre-generation of tokens with virtual media usage.
|
||||
node.set_driver_internal_info('agent_secret_token', "******")
|
||||
return node
|
||||
|
@ -238,7 +238,7 @@ def execute_step_on_child_nodes(task, step):
|
||||
:param task: The TaskManager object for the parent node.
|
||||
:param step: The requested step to be executed.
|
||||
:returns: None on Success, the resulting error message if a
|
||||
failure has occured.
|
||||
failure has occurred.
|
||||
"""
|
||||
# NOTE(TheJulia): We could just use nodeinfo list calls against
|
||||
# dbapi.
|
||||
|
@ -1057,7 +1057,7 @@ def power_state_for_network_configuration(task):
|
||||
def build_configdrive(node, configdrive):
|
||||
"""Build a configdrive from provided meta_data, network_data and user_data.
|
||||
|
||||
If uuid or name are not provided in the meta_data, they're defauled to the
|
||||
If uuid or name are not provided in the meta_data, they're defaulted to the
|
||||
node's uuid and name accordingly.
|
||||
|
||||
:param node: an Ironic node object.
|
||||
@ -1130,7 +1130,7 @@ def value_within_timeout(value, timeout):
|
||||
:param timeout: timeout in seconds.
|
||||
"""
|
||||
# use native datetime objects for conversion and compare
|
||||
# slightly odd because py2 compatability :(
|
||||
# slightly odd because py2 compatibility :(
|
||||
last = datetime.datetime.strptime(value or '1970-01-01T00:00:00.000000',
|
||||
"%Y-%m-%dT%H:%M:%S.%f")
|
||||
# If we found nothing, we assume that the time is essentially epoch.
|
||||
@ -1142,7 +1142,7 @@ def value_within_timeout(value, timeout):
|
||||
def agent_is_alive(node, timeout=None):
|
||||
"""Check that the agent is likely alive.
|
||||
|
||||
The method then checks for the last agent heartbeat, and if it occured
|
||||
The method then checks for the last agent heartbeat, and if it occurred
|
||||
within the timeout set by [deploy]fast_track_timeout, then agent is
|
||||
presumed alive.
|
||||
|
||||
@ -1167,7 +1167,7 @@ def is_fast_track(task):
|
||||
have a ramdisk running through another means like discovery.
|
||||
If not valid, False is returned.
|
||||
|
||||
The method then checks for the last agent heartbeat, and if it occured
|
||||
The method then checks for the last agent heartbeat, and if it occurred
|
||||
within the timeout set by [deploy]fast_track_timeout and the power
|
||||
state for the machine is POWER_ON, then fast track is permitted.
|
||||
|
||||
@ -1339,7 +1339,7 @@ def is_agent_token_pregenerated(node):
|
||||
|
||||
This method helps us identify WHEN we did so as we don't need to remove
|
||||
records of the token prior to rebooting the token. This is important as
|
||||
tokens provided through out of band means presist in the virtual media
|
||||
tokens provided through out of band means persist in the virtual media
|
||||
image, are loaded as part of the agent ramdisk, and do not require
|
||||
regeneration of the token upon the initial lookup, ultimately making
|
||||
the overall usage of virtual media and pregenerated tokens far more
|
||||
@ -1653,7 +1653,7 @@ def node_history_record(node, conductor=None, event=None,
|
||||
based upon the activity. The purpose is to help guide
|
||||
an API consumer/operator to have a better contextual
|
||||
understanding of what was going on *when* the "event"
|
||||
occured.
|
||||
occurred.
|
||||
:param user: The user_id value which triggered the request,
|
||||
if available.
|
||||
:param error: Boolean value, default false, to signify if the event
|
||||
@ -1662,11 +1662,11 @@ def node_history_record(node, conductor=None, event=None,
|
||||
:returns: None. No value is returned by this method.
|
||||
"""
|
||||
if not event:
|
||||
# No error has occured, apparently.
|
||||
# No error has occurred, apparently.
|
||||
return
|
||||
if error:
|
||||
# When the task exits out or is saved, the event
|
||||
# or error is saved, but that is outside of ceating an
|
||||
# or error is saved, but that is outside of creating an
|
||||
# entry in the history table.
|
||||
node.last_error = event
|
||||
if not conductor:
|
||||
@ -1706,7 +1706,7 @@ def update_image_type(context, node):
|
||||
# idea since it is also user-settable, but laregely is just geared
|
||||
# to take what is in glance. Line below should we wish to uncomment.
|
||||
# node.set_instance_info('image_type', images.IMAGE_TYPE_DIRECTORY)
|
||||
# An alternative is to explictly allow it to be configured by the
|
||||
# An alternative is to explicitly allow it to be configured by the
|
||||
# caller/requester.
|
||||
return True
|
||||
|
||||
@ -1747,7 +1747,7 @@ def get_token_project_from_request(ctx):
|
||||
This method evaluates the ``auth_token_info`` field, which is used to
|
||||
pass information returned from keystone as a token's
|
||||
verification. This information is based upon the actual, original
|
||||
requestor context provided ``auth_token``.
|
||||
requester context provided ``auth_token``.
|
||||
|
||||
When a service, such as Nova proxies a request, the request provided
|
||||
auth token value is intended to be from the original user.
|
||||
@ -1761,7 +1761,7 @@ def get_token_project_from_request(ctx):
|
||||
if project:
|
||||
return project.get('id')
|
||||
except AttributeError:
|
||||
LOG.warning('Attempted to identify requestor project ID value, '
|
||||
LOG.warning('Attempted to identify requester project ID value, '
|
||||
'however we were unable to do so. Possible older API?')
|
||||
|
||||
|
||||
@ -1776,7 +1776,7 @@ def servicing_error_handler(task, logmsg, errmsg=None, traceback=False,
|
||||
used.
|
||||
:param traceback: Whether to log a traceback. Defaults to False.
|
||||
:param tear_down_service: Whether to clean up the PXE and DHCP files after
|
||||
servie. Default to True.
|
||||
service. Default to True.
|
||||
:param set_fail_state: Whether to set node to failed state. Default to
|
||||
True.
|
||||
:param set_maintenance: Whether to set maintenance mode. If None,
|
||||
|
@ -37,7 +37,7 @@ opts = [
|
||||
'facilitate easy testing and use of the ``anaconda`` '
|
||||
'deployment interface. When this option is set, '
|
||||
'heartbeat operations, depending on the contents of '
|
||||
'the utilized kickstart template, may not enfore TLS '
|
||||
'the utilized kickstart template, may not enforce TLS '
|
||||
'certificate verification.')),
|
||||
]
|
||||
|
||||
|
@ -79,7 +79,7 @@ opts = [
|
||||
cfg.BoolOpt('enable_for_nodes',
|
||||
default=True,
|
||||
help=_('If to transmit any sensor data for any nodes under '
|
||||
'this conductor\'s management. This option superceeds '
|
||||
'this conductor\'s management. This option supersedes '
|
||||
'the ``send_sensor_data_for_undeployed_nodes`` '
|
||||
'setting.')),
|
||||
]
|
||||
|
@ -1419,7 +1419,7 @@ class Connection(object, metaclass=abc.ABCMeta):
|
||||
def bulk_delete_node_history_records(self, node_id, limit):
|
||||
"""Utility method to bulk delete node history entries.
|
||||
|
||||
:param entires: A list of node history entriy id's to be
|
||||
:param entries: A list of node history entry id's to be
|
||||
queried for deletion.
|
||||
"""
|
||||
|
||||
|
@ -401,7 +401,7 @@ def _filter_active_conductors(query, interval=None):
|
||||
if interval is None:
|
||||
interval = CONF.conductor.heartbeat_timeout
|
||||
if not utils.is_ironic_using_sqlite() and interval > 0:
|
||||
# Check for greater than zero becaues if the value is zero,
|
||||
# Check for greater than zero because if the value is zero,
|
||||
# then the logic makes no sense.
|
||||
limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
|
||||
query = (query.filter(models.Conductor.online.is_(True))
|
||||
@ -1852,7 +1852,7 @@ class Connection(api.Connection):
|
||||
# a missing table, i.e. database upgrades which will create
|
||||
# the table *and* the field version is 1.0, which means we
|
||||
# are likely about to *create* the table, but first have to
|
||||
# pass the version/compatability checking logic.
|
||||
# pass the version/compatibility checking logic.
|
||||
table_missing_ok = True
|
||||
|
||||
# NOTE(mgagne): Additional safety check to detect old database
|
||||
|
@ -1411,7 +1411,7 @@ class BIOSInterface(BaseInterface):
|
||||
table with the BIOS configuration applied on the node.
|
||||
|
||||
:param task: a TaskManager instance.
|
||||
:param settings: Dictonary containing the BIOS configuration.
|
||||
:param settings: Dictionary containing the BIOS configuration.
|
||||
:raises: UnsupportedDriverExtension, if the node's driver doesn't
|
||||
support BIOS configuration.
|
||||
:raises: InvalidParameterValue, if validation of settings fails.
|
||||
@ -1807,7 +1807,7 @@ class NetworkInterface(BaseInterface):
|
||||
:raises: InvalidParameterValue, if the network interface configuration
|
||||
is invalid.
|
||||
:raises: MissingParameterValue, if some parameters are missing.
|
||||
:returns: a dict holding network configuration information adhearing
|
||||
:returns: a dict holding network configuration information adhering
|
||||
Nova network metadata layout (`network_data.json`).
|
||||
"""
|
||||
return task.node.network_data or {}
|
||||
|
@ -195,7 +195,7 @@ def validate_http_provisioning_configuration(node):
|
||||
'[deploy]http_root': CONF.deploy.http_root,
|
||||
'[deploy]http_image_subdir': CONF.deploy.http_image_subdir
|
||||
}
|
||||
error_msg = _('Node %s failed to validate http provisoning. Some '
|
||||
error_msg = _('Node %s failed to validate http provisioning. Some '
|
||||
'configuration options were missing') % node.uuid
|
||||
deploy_utils.check_for_missing_params(params, error_msg)
|
||||
|
||||
@ -863,7 +863,7 @@ class AgentRescue(base.RescueInterface):
|
||||
|
||||
# NOTE(TheJulia): Revealing that the power is off at any time can
|
||||
# cause external power sync to decide that the node must be off.
|
||||
# This may result in a post-rescued insance being turned off
|
||||
# This may result in a post-rescued instance being turned off
|
||||
# unexpectedly after unrescue.
|
||||
# TODO(TheJulia): Once we have power/state callbacks to nova,
|
||||
# the reset of the power_state can be removed.
|
||||
|
@ -864,7 +864,7 @@ class AgentBaseMixin(object):
|
||||
|
||||
:param task: a TaskManager object containing the node
|
||||
:returns: A list of service step dictionaries, if an error
|
||||
occurs, then an emtpy list is returned.
|
||||
occurs, then an empty list is returned.
|
||||
"""
|
||||
new_priorities = {
|
||||
'erase_devices': CONF.deploy.erase_devices_priority,
|
||||
|
@ -260,7 +260,7 @@ class AgentClient(object):
|
||||
# is already busy.
|
||||
# NOTE(TheJulia): The agent sends upper case A as of
|
||||
# late victoria, but lower case the entire message
|
||||
# for compatability with pre-late victoria agents
|
||||
# for compatibility with pre-late victoria agents
|
||||
# which returns HTTP 409.
|
||||
raise exception.AgentInProgress(node=node.uuid,
|
||||
command=method,
|
||||
|
@ -1,5 +1,5 @@
|
||||
# NOTE(pas-ha) this is to ensure that partition metadata that might be stored
|
||||
# in the start or end of partiton itself also becomes unusable
|
||||
# in the start or end of partition itself also becomes unusable
|
||||
# and does not interfere with future partition scheme if new partitions
|
||||
# happen to fall on the same boundaries where old partitions were.
|
||||
# NOTE(pas-ha) loop_control works with Ansible >= 2.1
|
||||
|
@ -1154,7 +1154,7 @@ def _validate_image_url(node, url, secret=False):
|
||||
|
||||
|
||||
def _cache_and_convert_image(task, instance_info, image_info=None):
|
||||
"""Cache an image locally and covert it to RAW if needed."""
|
||||
"""Cache an image locally and convert it to RAW if needed."""
|
||||
# Ironic cache and serve images from httpboot server
|
||||
force_raw = direct_deploy_should_convert_raw_image(task.node)
|
||||
_, image_path = cache_instance_image(task.context, task.node,
|
||||
|
@ -184,9 +184,9 @@ class DracWSManInspect(base.InspectInterface):
|
||||
'boot_mode': bios_settings["BootMode"].current_value.lower(),
|
||||
'pci_gpu_devices': self._calculate_gpus(video_controllers)}
|
||||
|
||||
capabilties = utils.get_updated_capabilities(current_capabilities,
|
||||
new_capabilities)
|
||||
properties['capabilities'] = capabilties
|
||||
capabilities = utils.get_updated_capabilities(current_capabilities,
|
||||
new_capabilities)
|
||||
properties['capabilities'] = capabilities
|
||||
|
||||
virtual_disks = client.list_virtual_disks()
|
||||
root_disk = self._guess_root_disk(virtual_disks)
|
||||
|
@ -111,7 +111,7 @@ class IloBIOS(base.BIOSInterface):
|
||||
def _execute_post_boot_bios_step(self, task, step):
|
||||
"""Perform operations required after the reboot.
|
||||
|
||||
Caches BIOS settings in the database and clear the flags assocated
|
||||
Caches BIOS settings in the database and clear the flags associated
|
||||
with the clean step post reboot.
|
||||
:param task: a task from TaskManager.
|
||||
:param step: name of the clean step to be performed
|
||||
|
@ -59,7 +59,7 @@ OPTIONAL_PROPERTIES = {
|
||||
'client_port': _("port to be used for iLO operations. Optional."),
|
||||
'client_timeout': _("timeout (in seconds) for iLO operations. Optional."),
|
||||
'ca_file': _("CA certificate file to validate iLO. This "
|
||||
"attibute is deprecated and will be removed in "
|
||||
"attribute is deprecated and will be removed in "
|
||||
"future release. Optional"),
|
||||
'ilo_verify_ca': _("Either a Boolean value, a path to a CA_BUNDLE "
|
||||
"file or directory with certificates of trusted "
|
||||
|
@ -318,7 +318,7 @@ def _download_swift_based_fw_to(self, target_file):
|
||||
:raises: ImageDownloadFailed, on failure to download the original file.
|
||||
"""
|
||||
# set the parsed_url attribute to the newly created tempurl from swift and
|
||||
# delegate the dowloading job to the http_based downloader
|
||||
# delegate the downloading job to the http_based downloader
|
||||
self.parsed_url = urlparse.urlparse(get_swift_url(self.parsed_url))
|
||||
_download_http_based_fw_to(self, target_file)
|
||||
|
||||
|
@ -803,7 +803,7 @@ class IloManagement(base.ManagementInterface):
|
||||
"""Deploy/Clean step hook after SUM based firmware update operation.
|
||||
|
||||
This method is invoked as a post deploy/clean step hook by the Ironic
|
||||
conductor once firmware update operaion is completed. The deploy/clean
|
||||
conductor once firmware update operation is completed. The deploy/clean
|
||||
logs are collected and stored according to the configured storage
|
||||
backend when the node is configured to collect the logs.
|
||||
|
||||
@ -1212,7 +1212,7 @@ class Ilo5Management(IloManagement):
|
||||
"""Clears the certificates provided in the list of files to iLO.
|
||||
|
||||
:param task: a task from TaskManager.
|
||||
:param certificate_files: a list of cerificate files.
|
||||
:param certificate_files: a list of certificate files.
|
||||
:raises: NodeCleaningFailure, on failure to execute of clean step.
|
||||
:raises: InstanceDeployFailure, on failure to execute of deploy step.
|
||||
"""
|
||||
|
@ -284,7 +284,7 @@ def _can_get_server_post_state(node):
|
||||
Returns True if the POST state of the server can be retrieved.
|
||||
It cannot be retrieved for older ProLiant models.
|
||||
:param node: The node.
|
||||
:returns: True if POST state can be retrieved, else Flase.
|
||||
:returns: True if POST state can be retrieved, else False.
|
||||
:raises: IloOperationError on an error from IloClient library.
|
||||
"""
|
||||
try:
|
||||
|
@ -42,7 +42,7 @@ class LocalLinkConnectionHook(base.InspectionHook):
|
||||
except binascii.Error:
|
||||
LOG.warning('TLV value for TLV type %d is not in correct '
|
||||
'format. Ensure that the TLV value is in '
|
||||
'hexidecimal format when sent to ironic. Node: %s',
|
||||
'hexadecimal format when sent to ironic. Node: %s',
|
||||
tlv_type, node_uuid)
|
||||
return
|
||||
|
||||
|
@ -246,7 +246,7 @@ def get_autoneg_cap(pmd):
|
||||
Dependent (PMD) capability bits.
|
||||
|
||||
:param pmd: PMD bits
|
||||
:return: Sorted ist containing capability strings
|
||||
:return: Sorted list containing capability strings
|
||||
"""
|
||||
caps_set = set()
|
||||
|
||||
|
@ -524,7 +524,7 @@ def _ipmitool_timing_args():
|
||||
|
||||
|
||||
def choose_cipher_suite(actual_cipher_suite):
|
||||
"""Gives the possible next avaible cipher suite version.
|
||||
"""Gives the possible next available cipher suite version.
|
||||
|
||||
Based on CONF.ipmi.cipher_suite_versions and the last cipher suite version
|
||||
used that failed. This function is only called if the node doesn't have
|
||||
@ -1209,8 +1209,8 @@ class IPMIManagement(base.ManagementInterface):
|
||||
boot_mode = boot_mode_utils.get_boot_mode(task.node)
|
||||
if boot_mode == 'uefi':
|
||||
# Long story short: UEFI was added to IPMI after the final spec
|
||||
# release occured. This means BMCs may actually NEED to be
|
||||
# explicitly told if the boot is persistant because the
|
||||
# release occurred. This means BMCs may actually NEED to be
|
||||
# explicitly told if the boot is persistent because the
|
||||
# BMC may return a value which is explicitly parsed as
|
||||
# no change, BUT the BMC may treat that as operational default.
|
||||
efi_persistence = '0xe0' if persistent else '0xa0'
|
||||
@ -1340,7 +1340,7 @@ class IPMIManagement(base.ManagementInterface):
|
||||
"""
|
||||
driver_info = _parse_driver_info(task.node)
|
||||
# with '-v' option, we can get the entire sensor data including the
|
||||
# extended sensor informations
|
||||
# extended sensor information
|
||||
cmd = "sdr -v"
|
||||
try:
|
||||
out, err = _exec_ipmitool(
|
||||
|
@ -644,12 +644,12 @@ def within_version_ranges(node, version_ranges):
|
||||
|
||||
v_range = version_ranges[os_num]
|
||||
|
||||
# An OS number with no ranges setted means no need to check
|
||||
# An OS number with no ranges set means no need to check
|
||||
# specific version, all the version under this OS number is valid.
|
||||
if not v_range:
|
||||
return True
|
||||
|
||||
# Specific range is setted, check if the node's
|
||||
# Specific range is set, check if the node's
|
||||
# firmware version is within it.
|
||||
min_ver = v_range.get('min')
|
||||
upper_ver = v_range.get('upper')
|
||||
|
@ -72,7 +72,7 @@ def _is_expected_power_state(target_state, boot_status_value):
|
||||
|
||||
:param target_state: Target power state.
|
||||
:param boot_status_value: SNMP BOOT_STATUS_VALUE.
|
||||
:returns: True if expected power state, otherwise Flase.
|
||||
:returns: True if expected power state, otherwise False.
|
||||
"""
|
||||
if (target_state == states.SOFT_POWER_OFF
|
||||
and boot_status_value in (BOOT_STATUS_VALUE['unknown'],
|
||||
|
@ -57,7 +57,7 @@ class IRMCVendorPassthru(base.VendorInterface):
|
||||
def cache_irmc_firmware_version(self, task, **kwargs):
|
||||
"""Fetch and save iRMC firmware version.
|
||||
|
||||
This method connects to iRMC and fetch iRMC firmware verison.
|
||||
This method connects to iRMC and fetch iRMC firmware version.
|
||||
If fetched firmware version is not cached in or is different from
|
||||
one in driver_internal_info/irmc_fw_version, store fetched version
|
||||
in driver_internal_info/irmc_fw_version.
|
||||
|
@ -607,7 +607,7 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
|
||||
:raises: InvalidParameterValue, if the network interface configuration
|
||||
is invalid.
|
||||
:raises: MissingParameterValue, if some parameters are missing.
|
||||
:returns: a dict holding network configuration information adhearing
|
||||
:returns: a dict holding network configuration information adhering
|
||||
Nova network metadata layout (`network_data.json`).
|
||||
"""
|
||||
# NOTE(etingof): static network data takes precedence
|
||||
|
@ -202,7 +202,7 @@ class PXEBaseMixin(object):
|
||||
pxe_options = pxe_utils.build_pxe_config_options(
|
||||
task, pxe_info, ipxe_enabled=self.ipxe_enabled,
|
||||
ramdisk_params=ramdisk_params)
|
||||
# TODO(dtantsur): backwards compability hack, remove in the V release
|
||||
# TODO(dtantsur): backwards compatibility hack, remove in the V release
|
||||
if ramdisk_params.get("ipa-api-url"):
|
||||
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
|
||||
|
||||
|
@ -63,7 +63,7 @@ class RamdiskDeploy(agent_base.AgentBaseMixin, agent_base.HeartbeatMixin,
|
||||
{'node': task.node,
|
||||
'drv': task.node.get_interface('boot')})
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
# Tenant neworks must enable connectivity to the boot
|
||||
# Tenant networks must enable connectivity to the boot
|
||||
# location, as reboot() can otherwise be very problematic.
|
||||
# IDEA(TheJulia): Maybe a "trusted environment" mode flag
|
||||
# that we otherwise fail validation on for drivers that
|
||||
|
@ -78,7 +78,7 @@ class RedfishFirmware(base.FirmwareInterface):
|
||||
node_id = task.node.id
|
||||
settings = []
|
||||
# NOTE(iurygregory): currently we will only retrieve BIOS and BMC
|
||||
# firmware information trough the redfish system and manager.
|
||||
# firmware information through the redfish system and manager.
|
||||
|
||||
system = redfish_utils.get_system(task.node)
|
||||
|
||||
|
@ -56,7 +56,7 @@ def parse_driver_info(node):
|
||||
Parses the driver_info of the node, reads default values
|
||||
and returns a dict containing the combination of both.
|
||||
|
||||
:param node: an ironic node object to get informatin from.
|
||||
:param node: an ironic node object to get information from.
|
||||
:returns: a dict containing information parsed from driver_info.
|
||||
:raises: InvalidParameterValue if some required information
|
||||
is missing on the node or inputs is invalid.
|
||||
@ -132,7 +132,7 @@ def get_xclarity_client(node):
|
||||
def get_server_hardware_id(node):
|
||||
"""Validates node configuration and returns xclarity hardware id.
|
||||
|
||||
Validates whether node configutation is consistent with XClarity and
|
||||
Validates whether node configuration is consistent with XClarity and
|
||||
returns the XClarity Hardware ID for a specific node.
|
||||
:param node: node object to get information from
|
||||
:returns: the XClarity Hardware ID for a specific node
|
||||
|
@ -160,7 +160,7 @@ class Conductor(base.IronicObject, object_base.VersionedObjectDictCompat):
|
||||
"""Register hardware interfaces with the conductor.
|
||||
|
||||
:param interfaces: List of interface to register, each entry should
|
||||
be a dictionary conaining "hardware_type", "interface_type",
|
||||
be a dictionary containing "hardware_type", "interface_type",
|
||||
"interface_name" and "default", e.g.
|
||||
{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
|
||||
'interface_name': 'direct', 'default': True}
|
||||
|
@ -1044,7 +1044,7 @@ class TestPost(test_api_base.BaseApiTest):
|
||||
self.assertEqual(http_client.FORBIDDEN, response.status_int)
|
||||
expected_faultstring = ('Cannot create allocation with an owner '
|
||||
'Project ID value 12345 not matching the '
|
||||
'requestor Project ID 0987. Policy '
|
||||
'requester Project ID 0987. Policy '
|
||||
'baremetal:allocation:create_restricted '
|
||||
'is required for this capability.')
|
||||
error_body = json.loads(response.json['error_message'])
|
||||
|
@ -944,14 +944,14 @@ class TestListNodes(test_api_base.BaseApiTest):
|
||||
mock_authorize.side_effect = mock_authorize_function
|
||||
|
||||
instance_uuid = '6eccd391-961c-4da5-b3c5-e2fa5cfbbd9d'
|
||||
requestor_uuid = '46c0bf8a-846d-49a5-9724-5a61a5efa6bf'
|
||||
requester_uuid = '46c0bf8a-846d-49a5-9724-5a61a5efa6bf'
|
||||
obj_utils.create_test_node(
|
||||
self.context,
|
||||
owner='97879042-c0bf-4216-882a-66a7cbf2bd74',
|
||||
instance_uuid=instance_uuid)
|
||||
data = self.get_json(
|
||||
'/nodes/detail?instance_uuid=%s' % instance_uuid,
|
||||
headers={'X-Project-ID': requestor_uuid,
|
||||
headers={'X-Project-ID': requester_uuid,
|
||||
api_base.Version.string: str(api_v1.max_version())})
|
||||
self.assertEqual(0, len(data['nodes']))
|
||||
|
||||
@ -964,14 +964,14 @@ class TestListNodes(test_api_base.BaseApiTest):
|
||||
mock_authorize.side_effect = mock_authorize_function
|
||||
|
||||
instance_uuid = '6eccd391-961c-4da5-b3c5-e2fa5cfbbd9d'
|
||||
requestor_uuid = '46c0bf8a-846d-49a5-9724-5a61a5efa6bf'
|
||||
requester_uuid = '46c0bf8a-846d-49a5-9724-5a61a5efa6bf'
|
||||
node = obj_utils.create_test_node(
|
||||
self.context,
|
||||
owner=requestor_uuid,
|
||||
owner=requester_uuid,
|
||||
instance_uuid=instance_uuid)
|
||||
data = self.get_json(
|
||||
'/nodes/detail?instance_uuid=%s' % instance_uuid,
|
||||
headers={'X-Project-ID': requestor_uuid,
|
||||
headers={'X-Project-ID': requester_uuid,
|
||||
api_base.Version.string: str(api_v1.max_version())})
|
||||
self.assertEqual(1, len(data['nodes']))
|
||||
# Assert we did get the node and it matched.
|
||||
|
@ -165,7 +165,7 @@ class TestACLBase(base.BaseApiTest):
|
||||
# Example: PATCH /v1/nodes/<uuid> as a reader.
|
||||
# 404 - Trying to access something where we don't have permissions
|
||||
# in a project scope. This is particularly true where implied
|
||||
# permissions or assocation exists. Ports are attempted to be
|
||||
# permissions or association exists. Ports are attempted to be
|
||||
# accessed when the underlying node is inaccessible as owner
|
||||
# nor node matches.
|
||||
# Example: GET /v1/portgroups or /v1/nodes/<uuid>/ports
|
||||
@ -198,7 +198,7 @@ class TestACLBase(base.BaseApiTest):
|
||||
# json ends up being null in json or None.
|
||||
self.assertIsNone(response.json[k])
|
||||
elif str(v) == "{}":
|
||||
# Special match for signifying a dictonary.
|
||||
# Special match for signifying a dictionary.
|
||||
self.assertEqual({}, response.json[k])
|
||||
elif isinstance(v, dict):
|
||||
# The value from the YAML can be a dictionary,
|
||||
@ -347,7 +347,7 @@ class TestRBACScoped(TestRBACModelBeforeScopes):
|
||||
# while we also enable the new ones in another test class with
|
||||
# the appropriate scope friendly chagnges. In other words, two
|
||||
# test changes will be needed for each which should also reduce
|
||||
# risk of accidential policy changes. It may just be Julia being
|
||||
# risk of accidental policy changes. It may just be Julia being
|
||||
# super risk-adverse, just let her roll with it and we will delete
|
||||
# this class later.
|
||||
# NOTE(TheJulia): This test class runs with test_rbac_legacy.yaml!
|
||||
|
@ -1481,7 +1481,7 @@ volume_targets_get_observer:
|
||||
deprecated: true
|
||||
|
||||
# NOTE(TheJulia): Because we can't seem to get the uuid
|
||||
# to load from an existing uuid, since we're not subsituting
|
||||
# to load from an existing uuid, since we're not substituting
|
||||
# it, this will return with 400 due to the ID not matching.
|
||||
volume_targets_post_admin:
|
||||
path: '/v1/volume/targets'
|
||||
|
@ -24,7 +24,7 @@
|
||||
# they can determine that something is special, something is different,
|
||||
# and from there try to determine *what* it is. The key in their case
|
||||
# is the ID values, but they don't know that from the outside.
|
||||
# This is also why thid party admins should get 200s and empty lists,
|
||||
# This is also why third party admins should get 200s and empty lists,
|
||||
# again the database query should be filtered. Third party admin,
|
||||
# in essence serves as the primary negative test.
|
||||
#
|
||||
@ -581,7 +581,7 @@ owner_manager_cannot_change_owner:
|
||||
# This is not an explicitly restricted item, it falls
|
||||
# to generalized update capability, which oddly makes
|
||||
# a lot of sense in this case. It is a flag to prevent
|
||||
# accidential erasure/removal of the node.
|
||||
# accidental erasure/removal of the node.
|
||||
|
||||
lessee_member_can_set_protected:
|
||||
path: '/v1/nodes/{lessee_node_ident}'
|
||||
@ -3531,7 +3531,7 @@ third_party_admin_cannot_create_allocation_with_candidates_not_owned:
|
||||
assert_status: 400
|
||||
|
||||
owner_admin_can_create_allocation_with_their_uuid:
|
||||
# NOTE(TheJulia): Owner/Lessee are equivelent in
|
||||
# NOTE(TheJulia): Owner/Lessee are equivalent in
|
||||
# this context, so testing only one is fine.
|
||||
path: '/v1/allocations'
|
||||
method: post
|
||||
|
@ -1588,7 +1588,7 @@ volume_targets_get_service:
|
||||
assert_status: 200
|
||||
|
||||
# NOTE(TheJulia): Because we can't seem to get the uuid
|
||||
# to load from an existing uuid, since we're not subsituting
|
||||
# to load from an existing uuid, since we're not substituting
|
||||
# it, this will return with 400 due to the ID not matching.
|
||||
volume_targets_post_admin:
|
||||
path: '/v1/volume/targets'
|
||||
|
@ -29,45 +29,47 @@ CONF = cfg.CONF
|
||||
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
|
||||
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
|
||||
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
|
||||
CONFIG_DRIVE = ('H4sICDw0S2AC/3RtcGhYdnFvdADt3X1vFMcdAOBZkwbTIquiL6oiJ9kkkJBKN'
|
||||
'mcTkTiVKl3Oa3uTe9PdOYK/0AmOvNqO4IJatZWav5pK/UztV8kXiPoR2tm98x'
|
||||
's+fCQQMPA8i71zs7Mz4/VJvx0vMxcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAJDUViuVpSTU8+bm1fT+buxs3/rsk4Xl+x3fre8/h3bHtBv/FV9h'
|
||||
'dja8P8p6/9f7h39bfHs9zI9ezYfZYjcb/nzqbDI//93M7vnpE7bTH36a7nw12'
|
||||
'L4z7N/4Ih0O+lvp82Q9a+bdVt6ormdpTKQrV65ULm2sddO1vJ51r3V7WSOtdb'
|
||||
'Jqr9VJL9beTpdWVi6n2eK11mZzfbVaz3Yz311YrlSupB8utrNqp9tqXvpwsVv'
|
||||
'byOvxXblelikOF2XeTWurnY/yXtrLqo3H/uMuV5aXKpeXlitLy+8tv1epfHck'
|
||||
'o3KPcKTEk3/T8mQJOpwYM+P4H+ohD82wGa6GdOJ2I+yE7XArfBY+CQth+cjxe'
|
||||
'+L/hUvZA8f/5iir+bv9wy+N4v+42vR+8f8+fX18207oh2H4tEx9FQbxCt2Jr/'
|
||||
'vxan0R84Yxpx+2nngvf7ptPWTx15eHbmjF741QLXPScU4aVsKVuFXC9bAR1mJ'
|
||||
'eGr/n8b2WxfS1+NWLqUbMrYVOTFXj61ZMpeFizHk77pdiDSvhckxlYTGe0Yrv'
|
||||
'0GZsYzWWrZctTd8eXSHxH/GfZ8j/duM/AAAA8MxKymfsxfj/THi5TO09zg6nw'
|
||||
'6sxZybc2NkeDraH4cXwSvn6y/5wcGfo2gEAAMDTM/4Pxf+vT4rxf/RySA6O/6'
|
||||
'NXw8z++D96JcwY/wMAAMDTNv5Px38FOBdeG6WOzGSbC2+E4rn/eA7gsDw6PBt'
|
||||
'eH+V+Wc6BG5TlAQAAgBM5/g/F2idJMf6PXismABwd/0dvFBMBDo//Q7FEz4zx'
|
||||
'PwAAAJx0305dY7/bPp38+7+h0/lZ8k376vlkq1qUq26dGp136t4ae2svJXPjS'
|
||||
'g7vatl8cn5U6Pxu6e/Hu1vT+pE8gg6Ev5ZrHIRinsPEVs7sTX4oWvtnszF3YD'
|
||||
'2Eg22/MKrmhR/QNgCcHLemRMTkaOD/EbHv8UT3P5XrFYVizuLEVk6PJzKOY/v'
|
||||
'ZZHdlo4PtzoyqmPkB7d4t10UKxdzIie2+OJ4wOW73F8l4BaWHbBYAHiL+Hx+7'
|
||||
'JsT/HxGqpt5lJI/iLuPbcGFU5sJuF/dDZdHKL7cGw/71m/1hf/HzOzvbf1jaj'
|
||||
'ci/SkJxaGHvUNGR898UVXxzfvzZCMmDd+Tv4c1RkTfnRvu5w/04+/Wdwe1RP/'
|
||||
'b7MJeEveyHaz78K7w1KvPW5Otw7u5g++bO7UlX4jdJuPfgQ3YGgBMa/48fMz9'
|
||||
'N8X8YLo7KXJwd7WcPx73TxSeyxZA7jnVnklBkiG8APH+mf8bu1BLJO+XKAaGY'
|
||||
'PTCxxLkJH44LADzJ+H987H6Q+F8p1wcKxRzBiSXmDk8cDIvlykFl4xPLnzWlE'
|
||||
'AB+4vh/fCxOpt8hJH+c8tx9PmzFWF6M/BfCzTKy9+M9wOcxuhd3Be9MeVp+Ln'
|
||||
'wdSw7C7XB97+wPpjzhTsPd8l7jZmzh4Hn7rQLA8x3/jx+7P0j8//2U5+6zoTL'
|
||||
'eAICTIOt8n/y894+k08nb15dWVpaqvY0s7bRqH6WdfHU9S/NmL+vUNqrNmG53'
|
||||
'Wr1WrVUvEh/nq1k37W62261OL11rddJ2q5tfTdfyepZ2r3V7WSPtZo1qs5fXu'
|
||||
'u16Vu1maa3V7FVrvXQ179bS9uYH9by7kXXKk7vtrJav5bVqL281025rs1PLFt'
|
||||
'NYQ3agYGwyVreWF8lm7ETeqHaupR+36puNLI3dqcUfotcaVbjbVt6MrxpltYt'
|
||||
'+3QBQ+svfXAMAeN4U69CkexPPXQ8AMP4HAJ5F24PhgpE/AAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAn3f8BeXAIEgD4BQA=')
|
||||
CONFIG_DRIVE = (
|
||||
'H4sICDw0S2AC/3RtcGhYdnFvdADt3X1vFMcdAOBZkwbTIquiL6oiJ9kkkJBKN'
|
||||
'mcTkTiVKl3Oa3uTe9PdOYK/0AmOvNqO4IJatZWav5pK/UztV8kXiPoR2tm98x'
|
||||
's+fCQQMPA8i71zs7Mz4/VJvx0vMxcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAJDUViuVpSTU8+bm1fT+buxs3/rsk4Xl+x3fre8/h3bHtBv/FV9h'
|
||||
'dja8P8p6/9f7h39bfHs9zI9ezYfZYjcb/nzqbDI//93M7vnpE7bTH36a7nw12'
|
||||
'L4z7N/4Ih0O+lvp82Q9a+bdVt6ormdpTKQrV65ULm2sddO1vJ51r3V7WSOtdb'
|
||||
'Jqr9VJL9beTpdWVi6n2eK11mZzfbVaz3Yz311YrlSupB8utrNqp9tqXvpwsVv'
|
||||
'byOvxXblelikOF2XeTWurnY/yXtrLqo3H/uMuV5aXKpeXlitLy+8tv1epfHck'
|
||||
'o3KPcKTEk3/T8mQJOpwYM+P4H+ohD82wGa6GdOJ2I+yE7XArfBY+CQth+cjxe'
|
||||
'+L/hUvZA8f/5iir+bv9wy+N4v+42vR+8f8+fX18207oh2H4tEx9FQbxCt2Jr/'
|
||||
'vxan0R84Yxpx+2nngvf7ptPWTx15eHbmjF741QLXPScU4aVsKVuFXC9bAR1mJ'
|
||||
'eGr/n8b2WxfS1+NWLqUbMrYVOTFXj61ZMpeFizHk77pdiDSvhckxlYTGe0Yrv'
|
||||
'0GZsYzWWrZctTd8eXSHxH/GfZ8j/duM/AAAA8MxKymfsxfj/THi5TO09zg6nw'
|
||||
'6sxZybc2NkeDraH4cXwSvn6y/5wcGfo2gEAAMDTM/4Pxf+vT4rxf/RySA6O/6'
|
||||
'NXw8z++D96JcwY/wMAAMDTNv5Px38FOBdeG6WOzGSbC2+E4rn/eA7gsDw6PBt'
|
||||
'eH+V+Wc6BG5TlAQAAgBM5/g/F2idJMf6PXismABwd/0dvFBMBDo//Q7FEz4zx'
|
||||
'PwAAAJx0305dY7/bPp38+7+h0/lZ8k376vlkq1qUq26dGp136t4ae2svJXPjS'
|
||||
'g7vatl8cn5U6Pxu6e/Hu1vT+pE8gg6Ev5ZrHIRinsPEVs7sTX4oWvtnszF3YD'
|
||||
'2Eg22/MKrmhR/QNgCcHLemRMTkaOD/EbHv8UT3P5XrFYVizuLEVk6PJzKOY/v'
|
||||
'ZZHdlo4PtzoyqmPkB7d4t10UKxdzIie2+OJ4wOW73F8l4BaWHbBYAHiL+Hx+7'
|
||||
'JsT/HxGqpt5lJI/iLuPbcGFU5sJuF/dDZdHKL7cGw/71m/1hf/HzOzvbf1jaj'
|
||||
'ci/SkJxaGHvUNGR898UVXxzfvzZCMmDd+Tv4c1RkTfnRvu5w/04+/Wdwe1RP/'
|
||||
'b7MJeEveyHaz78K7w1KvPW5Otw7u5g++bO7UlX4jdJuPfgQ3YGgBMa/48fMz9'
|
||||
'N8X8YLo7KXJwd7WcPx73TxSeyxZA7jnVnklBkiG8APH+mf8bu1BLJO+XKAaGY'
|
||||
'PTCxxLkJH44LADzJ+H987H6Q+F8p1wcKxRzBiSXmDk8cDIvlykFl4xPLnzWlE'
|
||||
'AB+4vh/fCxOpt8hJH+c8tx9PmzFWF6M/BfCzTKy9+M9wOcxuhd3Be9MeVp+Ln'
|
||||
'wdSw7C7XB97+wPpjzhTsPd8l7jZmzh4Hn7rQLA8x3/jx+7P0j8//2U5+6zoTL'
|
||||
'eAICTIOt8n/y894+k08nb15dWVpaqvY0s7bRqH6WdfHU9S/NmL+vUNqrNmG53'
|
||||
'Wr1WrVUvEh/nq1k37W62261OL11rddJ2q5tfTdfyepZ2r3V7WSPtZo1qs5fXu'
|
||||
'u16Vu1maa3V7FVrvXQ179bS9uYH9by7kXXKk7vtrJav5bVqL281025rs1PLFt'
|
||||
'NYQ3agYGwyVreWF8lm7ETeqHaupR+36puNLI3dqcUfotcaVbjbVt6MrxpltYt'
|
||||
'+3QBQ+svfXAMAeN4U69CkexPPXQ8AMP4HAJ5F24PhgpE/AAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
||||
'AAAAAAAAAAAAAAAAAn3f8BeXAIEgD4BQA='
|
||||
)
|
||||
|
||||
|
||||
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
|
||||
|
@ -1477,7 +1477,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
|
||||
image_info = pxe_utils.get_instance_image_info(
|
||||
task, ipxe_enabled=False)
|
||||
self.assertEqual(expected_info, image_info)
|
||||
# In the absense of kickstart template in both instance_info and
|
||||
# In the absence of kickstart template in both instance_info and
|
||||
# image default kickstart template is used
|
||||
self.assertEqual('file://' + CONF.anaconda.default_ks_template,
|
||||
image_info['ks_template'][0])
|
||||
@ -1536,7 +1536,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
|
||||
image_info = pxe_utils.get_instance_image_info(
|
||||
task, ipxe_enabled=False)
|
||||
self.assertEqual(expected_info, image_info)
|
||||
# In the absense of kickstart template in both instance_info and
|
||||
# In the absence of kickstart template in both instance_info and
|
||||
# image default kickstart template is used
|
||||
self.assertEqual('file://' + CONF.anaconda.default_ks_template,
|
||||
image_info['ks_template'][0])
|
||||
|
@ -129,7 +129,7 @@ class ServiceSetUpMixin(object):
|
||||
self.hostname = 'test-host'
|
||||
# Relies upon the default number of "NodeLocked" retries as
|
||||
# in unit testing, sqllite is not operated in a transactional
|
||||
# way and utilizes asynchonous IO. Locking, in particular, can
|
||||
# way and utilizes asynchronous IO. Locking, in particular, can
|
||||
# detect this, and it can cause some false or delayed inpressions
|
||||
# of lock status, causing lock failures.
|
||||
self.config(node_locked_retry_attempts=3, group='conductor')
|
||||
|
@ -306,7 +306,7 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
|
||||
def test_start_dbapi_single_call(self, mock_dbapi):
|
||||
self._start_service()
|
||||
# NOTE(TheJulia): This seems like it should only be 1, but
|
||||
# the hash ring initailization pulls it's own database connection
|
||||
# the hash ring initialization pulls it's own database connection
|
||||
# instance, which is likely a good thing, thus this is 2 instead of
|
||||
# 3 without reuse of the database connection.
|
||||
self.assertEqual(2, mock_dbapi.call_count)
|
||||
|
@ -1356,7 +1356,7 @@ class DoNodeCleanTestChildNodes(db_base.DbTestCase):
|
||||
child_node1.refresh()
|
||||
child_node2.refresh()
|
||||
|
||||
# Confirm the objects *did* recieve locks.
|
||||
# Confirm the objects *did* receive locks.
|
||||
self.assertNotEqual(child1_updated_at, child_node1.updated_at)
|
||||
self.assertNotEqual(child2_updated_at, child_node2.updated_at)
|
||||
|
||||
@ -1439,7 +1439,7 @@ class DoNodeCleanTestChildNodes(db_base.DbTestCase):
|
||||
child_node1.refresh()
|
||||
child_node2.refresh()
|
||||
|
||||
# Confirm the objects *did* recieve locks.
|
||||
# Confirm the objects *did* receive locks.
|
||||
self.assertNotEqual(child1_updated_at, child_node1.updated_at)
|
||||
self.assertIsNone(child_node2.updated_at)
|
||||
|
||||
|
@ -637,7 +637,7 @@ class ChangeNodeBootModeTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
def test_change_node_boot_mode_exception_getting_current(self,
|
||||
get_boot_mock,
|
||||
set_boot_mock):
|
||||
# Test change_node_boot_mode smooth opertion when get_boot_mode mode
|
||||
# Test change_node_boot_mode smooth operation when get_boot_mode mode
|
||||
# raises an exception
|
||||
initial_state = boot_modes.LEGACY_BIOS
|
||||
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
|
||||
@ -854,7 +854,7 @@ class ChangeNodeSecureBootTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
def test_change_node_secure_boot_exception_getting_current(self,
|
||||
get_boot_mock,
|
||||
set_boot_mock):
|
||||
# Test change_node_secure_boot smooth opertion when
|
||||
# Test change_node_secure_boot smooth operation when
|
||||
# get_secure_boot_state raises an exception
|
||||
initial_state = False
|
||||
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
|
||||
@ -1010,7 +1010,7 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
|
||||
|
||||
def test_update_node_retired_invalid_state(self):
|
||||
# NOTE(arne_wiebalck): nodes in available cannot be 'retired'.
|
||||
# This is to ensure backwards comaptibility.
|
||||
# This is to ensure backwards compatibility.
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
provision_state='available')
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user