From 4b8026714fc538e8a9e8d6dfe6a13b5e8851119e Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Wed, 1 Feb 2017 02:53:18 +0000 Subject: [PATCH] Glare-ectomy Glare became a separate project during Newton. The code was copied out of the Glance tree, but remained in the Glance repository. It is no longer being maintained, and that has begun to cause some problems, for example, blocking a recent stevedore upper constraints change; see I141b17f9dd2acebe2b23f8fc93206e23bc70b568 This patch removes the Glare code from the Glance repository. It includes Alembic database migration scripts, in both the all-in-one and E-M-C format. It also includes release notes. Implements: blueprint glare-ectomy Change-Id: I3026ca6287a65ab5287bf3843f2a9d756ce15139 --- doc/source/conf.py | 2 - doc/source/sample-configuration.rst | 9 - etc/glance-glare.conf | 2340 ----------------- etc/oslo-config-generator/glance-glare.conf | 10 - glance/api/glare/__init__.py | 0 glance/api/glare/v0_1/__init__.py | 0 glance/api/glare/v0_1/glare.py | 922 ------- glance/api/glare/v0_1/router.py | 98 - glance/api/glare/versions.py | 93 - glance/api/middleware/version_negotiation.py | 13 - glance/api/versions.py | 11 +- glance/cmd/glare.py | 87 - glance/common/exception.py | 95 - glance/common/glare/__init__.py | 0 glance/common/glare/declarative.py | 743 ------ glance/common/glare/definitions.py | 571 ---- glance/common/glare/loader.py | 190 -- glance/common/glare/serialization.py | 328 --- glance/common/semver_db.py | 175 -- glance/contrib/__init__.py | 0 glance/contrib/plugins/__init__.py | 0 .../plugins/artifacts_sample/__init__.py | 5 - .../contrib/plugins/artifacts_sample/base.py | 29 - .../plugins/artifacts_sample/setup.cfg | 25 - .../contrib/plugins/artifacts_sample/setup.py | 20 - .../plugins/artifacts_sample/v1/__init__.py | 0 .../plugins/artifacts_sample/v2/__init__.py | 0 .../plugins/image_artifact/__init__.py | 0 .../plugins/image_artifact/requirements.txt | 1 - .../contrib/plugins/image_artifact/setup.cfg | 25 - .../plugins/image_artifact/v1/__init__.py | 0 .../plugins/image_artifact/v1/image.py | 38 - .../plugins/image_artifact/v1_1/__init__.py | 0 .../plugins/image_artifact/v1_1/image.py | 27 - .../plugins/image_artifact/v2/__init__.py | 0 .../plugins/image_artifact/v2/image.py | 83 - .../image_artifact/version_selector.py | 19 - glance/db/__init__.py | 95 - glance/db/migration.py | 2 +- glance/db/registry/api.py | 51 - glance/db/simple/api.py | 97 - .../data_migrations/pike_migrate01_empty.py} | 19 +- .../db/sqlalchemy/alembic_migrations/env.py | 3 - .../versions/pike01_drop_artifacts_tables.py | 41 + .../pike_contract01_drop_artifacts_tables.py | 41 + .../versions/pike_expand01_empty.py} | 22 +- glance/db/sqlalchemy/api.py | 57 - glance/db/sqlalchemy/glare.py | 784 ------ glance/db/sqlalchemy/models_glare.py | 337 --- glance/glare/__init__.py | 46 - glance/glare/dependency.py | 126 - glance/glare/domain/__init__.py | 69 - glance/glare/domain/proxy.py | 200 -- glance/glare/gateway.py | 54 - glance/glare/location.py | 198 -- glance/glare/updater.py | 205 -- glance/opts.py | 17 - glance/tests/functional/db/base_glare.py | 907 ------- .../functional/db/migrations/test_pike01.py | 54 + .../db/migrations/test_pike_contract01.py | 50 + .../db/migrations/test_pike_expand01.py | 47 + .../db/migrations/test_pike_migrate01.py} | 17 +- glance/tests/functional/db/test_migrations.py | 3 - glance/tests/functional/db/test_sqlalchemy.py | 15 - glance/tests/functional/glare/__init__.py | 0 glance/tests/functional/glare/test_glare.py | 2016 -------------- glance/tests/unit/common/test_semver.py | 77 - glance/tests/unit/test_domain.py | 21 - glance/tests/unit/test_glare_plugin_loader.py | 169 -- .../test_glare_type_definition_framework.py | 1128 -------- glance/tests/unit/test_store_glare.py | 71 - .../notes/glare-ectomy-72a1f80f306f2e3b.yaml | 41 + requirements.txt | 3 - 73 files changed, 313 insertions(+), 12729 deletions(-) delete mode 100644 etc/glance-glare.conf delete mode 100644 etc/oslo-config-generator/glance-glare.conf delete mode 100644 glance/api/glare/__init__.py delete mode 100644 glance/api/glare/v0_1/__init__.py delete mode 100644 glance/api/glare/v0_1/glare.py delete mode 100644 glance/api/glare/v0_1/router.py delete mode 100644 glance/api/glare/versions.py delete mode 100644 glance/cmd/glare.py delete mode 100644 glance/common/glare/__init__.py delete mode 100644 glance/common/glare/declarative.py delete mode 100644 glance/common/glare/definitions.py delete mode 100644 glance/common/glare/loader.py delete mode 100644 glance/common/glare/serialization.py delete mode 100644 glance/common/semver_db.py delete mode 100644 glance/contrib/__init__.py delete mode 100644 glance/contrib/plugins/__init__.py delete mode 100644 glance/contrib/plugins/artifacts_sample/__init__.py delete mode 100644 glance/contrib/plugins/artifacts_sample/base.py delete mode 100644 glance/contrib/plugins/artifacts_sample/setup.cfg delete mode 100644 glance/contrib/plugins/artifacts_sample/setup.py delete mode 100644 glance/contrib/plugins/artifacts_sample/v1/__init__.py delete mode 100644 glance/contrib/plugins/artifacts_sample/v2/__init__.py delete mode 100644 glance/contrib/plugins/image_artifact/__init__.py delete mode 100644 glance/contrib/plugins/image_artifact/requirements.txt delete mode 100644 glance/contrib/plugins/image_artifact/setup.cfg delete mode 100644 glance/contrib/plugins/image_artifact/v1/__init__.py delete mode 100644 glance/contrib/plugins/image_artifact/v1/image.py delete mode 100644 glance/contrib/plugins/image_artifact/v1_1/__init__.py delete mode 100644 glance/contrib/plugins/image_artifact/v1_1/image.py delete mode 100644 glance/contrib/plugins/image_artifact/v2/__init__.py delete mode 100644 glance/contrib/plugins/image_artifact/v2/image.py delete mode 100644 glance/contrib/plugins/image_artifact/version_selector.py rename glance/{contrib/plugins/artifacts_sample/v1/artifact.py => db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py} (63%) create mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py create mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py rename glance/{contrib/plugins/artifacts_sample/v2/artifact.py => db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py} (64%) delete mode 100644 glance/db/sqlalchemy/glare.py delete mode 100644 glance/db/sqlalchemy/models_glare.py delete mode 100644 glance/glare/__init__.py delete mode 100644 glance/glare/dependency.py delete mode 100644 glance/glare/domain/__init__.py delete mode 100644 glance/glare/domain/proxy.py delete mode 100644 glance/glare/gateway.py delete mode 100644 glance/glare/location.py delete mode 100644 glance/glare/updater.py delete mode 100644 glance/tests/functional/db/base_glare.py create mode 100644 glance/tests/functional/db/migrations/test_pike01.py create mode 100644 glance/tests/functional/db/migrations/test_pike_contract01.py create mode 100644 glance/tests/functional/db/migrations/test_pike_expand01.py rename glance/{contrib/plugins/image_artifact/setup.py => tests/functional/db/migrations/test_pike_migrate01.py} (63%) delete mode 100644 glance/tests/functional/glare/__init__.py delete mode 100644 glance/tests/functional/glare/test_glare.py delete mode 100644 glance/tests/unit/common/test_semver.py delete mode 100644 glance/tests/unit/test_glare_plugin_loader.py delete mode 100644 glance/tests/unit/test_glare_type_definition_framework.py delete mode 100644 glance/tests/unit/test_store_glare.py create mode 100644 releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml diff --git a/doc/source/conf.py b/doc/source/conf.py index c5b373a978..191f5480c6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,8 +60,6 @@ config_generator_config_file = [ '_static/glance-api'), ('../../etc/oslo-config-generator/glance-cache.conf', '_static/glance-cache'), - ('../../etc/oslo-config-generator/glance-glare.conf', - '_static/glance-glare'), ('../../etc/oslo-config-generator/glance-manage.conf', '_static/glance-manage'), ('../../etc/oslo-config-generator/glance-registry.conf', diff --git a/doc/source/sample-configuration.rst b/doc/source/sample-configuration.rst index 46664a6d26..f6dd5f1881 100644 --- a/doc/source/sample-configuration.rst +++ b/doc/source/sample-configuration.rst @@ -50,12 +50,3 @@ This sample configuration can also be viewed in `glance-cache.conf.sample <_static/glance-cache.conf.sample>`_. .. literalinclude:: _static/glance-cache.conf.sample - - -Sample configuration for Glare ------------------------------- - -This sample configuration can also be viewed in `glance-glare.conf.sample -<_static/glance-glare.conf.sample>`_. - -.. literalinclude:: _static/glance-glare.conf.sample diff --git a/etc/glance-glare.conf b/etc/glance-glare.conf deleted file mode 100644 index 5b83e1dfa4..0000000000 --- a/etc/glance-glare.conf +++ /dev/null @@ -1,2340 +0,0 @@ -[DEFAULT] - -# -# From glance.glare -# - -# -# Set the image owner to tenant or the authenticated user. -# -# Assign a boolean value to determine the owner of an image. When set to -# True, the owner of the image is the tenant. When set to False, the -# owner of the image will be the authenticated user issuing the request. -# Setting it to False makes the image private to the associated user and -# sharing with other users within the same tenant (or "project") -# requires explicit image sharing via image membership. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#owner_is_tenant = true - -# -# Role used to identify an authenticated user as administrator. -# -# Provide a string value representing a Keystone role to identify an -# administrative user. Users with this role will be granted -# administrative privileges. The default value for this option is -# 'admin'. -# -# Possible values: -# * A string value which is a valid Keystone role -# -# Related options: -# * None -# -# (string value) -#admin_role = admin - -# -# Allow limited access to unauthenticated users. -# -# Assign a boolean to determine API access for unathenticated -# users. When set to False, the API cannot be accessed by -# unauthenticated users. When set to True, unauthenticated users can -# access the API with read-only privileges. This however only applies -# when using ContextMiddleware. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#allow_anonymous_access = false - -# -# Limit the request ID length. -# -# Provide an integer value to limit the length of the request ID to -# the specified length. The default value is 64. Users can change this -# to any ineteger value between 0 and 16384 however keeping in mind that -# a larger value may flood the logs. -# -# Possible values: -# * Integer value between 0 and 16384 -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_request_id_length = 64 - -# -# Public url endpoint to use for Glance/Glare versions response. -# -# This is the public url endpoint that will appear in the Glance/Glare -# "versions" response. If no value is specified, the endpoint that is -# displayed in the version's response is that of the host running the -# API service. Change the endpoint to represent the proxy URL if the -# API service is running behind a proxy. If the service is running -# behind a load balancer, add the load balancer's URL for this value. -# -# Possible values: -# * None -# * Proxy URL -# * Load balancer URL -# -# Related options: -# * None -# -# (string value) -#public_endpoint = - -# -# IP address to bind the glance servers to. -# -# Provide an IP address to bind the glance server to. The default -# value is ``0.0.0.0``. -# -# Edit this option to enable the server to listen on one particular -# IP address on the network card. This facilitates selection of a -# particular network interface for the server. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# -# Related options: -# * None -# -# (string value) -#bind_host = 0.0.0.0 - -# -# Port number on which the server will listen. -# -# Provide a valid port number to bind the server's socket to. This -# port is then set to identify processes and forward network messages -# that arrive at the server. The default bind_port value for the API -# server is 9292 and for the registry server is 9191. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#bind_port = - -# -# Number of Glance worker processes to start. -# -# Provide a non-negative integer value to set the number of child -# process workers to service requests. By default, the number of CPUs -# available is set as the value for ``workers``. -# -# Each worker process is made to listen on the port set in the -# configuration file and contains a greenthread pool of size 1000. -# -# NOTE: Setting the number of workers to zero, triggers the creation -# of a single API process with a greenthread pool of size 1000. -# -# Possible values: -# * 0 -# * Positive integer value (typically equal to the number of CPUs) -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#workers = - -# -# Maximum line size of message headers. -# -# Provide an integer value representing a length to limit the size of -# message headers. The default value is 16384. -# -# NOTE: ``max_header_line`` may need to be increased when using large -# tokens (typically those generated by the Keystone v3 API with big -# service catalogs). However, it is to be kept in mind that larger -# values for ``max_header_line`` would flood the logs. -# -# Setting ``max_header_line`` to 0 sets no limit for the line size of -# message headers. -# -# Possible values: -# * 0 -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_header_line = 16384 - -# -# Set keep alive option for HTTP over TCP. -# -# Provide a boolean value to determine sending of keep alive packets. -# If set to ``False``, the server returns the header -# "Connection: close". If set to ``True``, the server returns a -# "Connection: Keep-Alive" in its responses. This enables retention of -# the same TCP connection for HTTP conversations instead of opening a -# new one with each new request. -# -# This option must be set to ``False`` if the client socket connection -# needs to be closed explicitly after the response is received and -# read successfully by the client. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#http_keepalive = true - -# -# Timeout for client connections' socket operations. -# -# Provide a valid integer value representing time in seconds to set -# the period of wait before an incoming connection can be closed. The -# default value is 900 seconds. -# -# The value zero implies wait forever. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#client_socket_timeout = 900 - -# -# Set the number of incoming connection requests. -# -# Provide a positive integer value to limit the number of requests in -# the backlog queue. The default queue size is 4096. -# -# An incoming connection to a TCP listener socket is queued before a -# connection can be established with the server. Setting the backlog -# for a TCP socket ensures a limited queue size for incoming traffic. -# -# Possible values: -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#backlog = 4096 - -# -# Set the wait time before a connection recheck. -# -# Provide a positive integer value representing time in seconds which -# is set as the idle wait time before a TCP keep alive packet can be -# sent to the host. The default value is 600 seconds. -# -# Setting ``tcp_keepidle`` helps verify at regular intervals that a -# connection is intact and prevents frequent TCP connection -# reestablishment. -# -# Possible values: -# * Positive integer value representing time in seconds -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#tcp_keepidle = 600 - -# -# Absolute path to the CA file. -# -# Provide a string value representing a valid absolute path to -# the Certificate Authority file to use for client authentication. -# -# A CA file typically contains necessary trusted certificates to -# use for the client authentication. This is essential to ensure -# that a secure connection is established to the server via the -# internet. -# -# Possible values: -# * Valid absolute path to the CA file -# -# Related options: -# * None -# -# (string value) -#ca_file = /etc/ssl/cafile - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file which is required to start the API service -# securely. -# -# A certificate file typically is a public key container and includes -# the server's public key, server name, server information and the -# signature which was a result of the verification process using the -# CA certificate. This is required for a secure connection -# establishment. -# -# Possible values: -# * Valid absolute path to the certificate file -# -# Related options: -# * None -# -# (string value) -#cert_file = /etc/ssl/certs - -# -# Absolute path to a private key file. -# -# Provide a string value representing a valid absolute path to a -# private key file which is required to establish the client-server -# connection. -# -# Possible values: -# * Absolute path to the private key file -# -# Related options: -# * None -# -# (string value) -#key_file = /etc/ssl/key/key-file.pem - -# -# Default publisher_id for outgoing Glance notifications. -# -# This is the value that the notification driver will use to identify -# messages for events originating from the Glance service. Typically, -# this is the hostname of the instance that generated the message. -# -# Possible values: -# * Any reasonable instance identifier, for example: image.host1 -# -# Related options: -# * None -# -# (string value) -#default_publisher_id = image.localhost - -# -# List of notifications to be disabled. -# -# Specify a list of notifications that should not be emitted. -# A notification can be given either as a notification type to -# disable a single event notification, or as a notification group -# prefix to disable all event notifications within a group. -# -# Possible values: -# A comma-separated list of individual notification types or -# notification groups to be disabled. Currently supported groups: -# * image -# * image.member -# * task -# * metadef_namespace -# * metadef_object -# * metadef_property -# * metadef_resource_type -# * metadef_tag -# For a complete listing and description of each event refer to: -# http://docs.openstack.org/developer/glance/notifications.html -# -# The values must be specified as: . -# For example: image.create,task.success,metadef_tag -# -# Related options: -# * None -# -# (list value) -#disabled_notifications = - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[cors] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. Format: "://[:]", no trailing -# slash. Example: https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the actual request. (list -# value) -#allow_headers = - - -[cors.subdomain] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. Format: "://[:]", no trailing -# slash. Example: https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the actual request. (list -# value) -#allow_headers = - - -[database] - -# -# From oslo.db -# - -# DEPRECATED: The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Should use config option connection or slave_connection to connect the -# database. -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set by -# the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 -# indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. (boolean -# value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# -# From oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API calls (boolean -# value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool = false - - -[glance_store] - -# -# From glance.store -# - -# -# List of enabled Glance stores. -# -# Register the storage backends to use for storing disk images -# as a comma separated list. The default stores enabled for -# storing disk images with Glance are ``file`` and ``http``. -# -# Possible values: -# * A comma separated list that could include: -# * file -# * http -# * swift -# * rbd -# * sheepdog -# * cinder -# * vmware -# -# Related Options: -# * default_store -# -# (list value) -#stores = file,http - -# -# The default scheme to use for storing images. -# -# Provide a string value representing the default scheme to use for -# storing images. If not set, Glance uses ``file`` as the default -# scheme to store images with the ``file`` store. -# -# NOTE: The value given for this configuration option must be a valid -# scheme for a store registered with the ``stores`` configuration -# option. -# -# Possible values: -# * file -# * filesystem -# * http -# * https -# * swift -# * swift+http -# * swift+https -# * swift+config -# * rbd -# * sheepdog -# * cinder -# * vsphere -# -# Related Options: -# * stores -# -# (string value) -# Allowed values: file, filesystem, http, https, swift, swift+http, swift+https, swift+config, rbd, sheepdog, cinder, vsphere -#default_store = file - -# -# Minimum interval in seconds to execute updating dynamic storage -# capabilities based on current backend status. -# -# Provide an integer value representing time in seconds to set the -# minimum interval before an update of dynamic storage capabilities -# for a storage backend can be attempted. Setting -# ``store_capabilities_update_min_interval`` does not mean updates -# occur periodically based on the set interval. Rather, the update -# is performed at the elapse of this interval set, if an operation -# of the store is triggered. -# -# By default, this option is set to zero and is disabled. Provide an -# integer value greater than zero to enable this option. -# -# NOTE: For more information on store capabilities and their updates, -# please visit: https://specs.openstack.org/openstack/glance-specs/specs/kilo -# /store-capabilities.html -# -# For more information on setting up a particular store in your -# deployment and help with the usage of this feature, please contact -# the storage driver maintainers listed here: -# http://docs.openstack.org/developer/glance_store/drivers/index.html -# -# Possible values: -# * Zero -# * Positive integer -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#store_capabilities_update_min_interval = 0 - -# -# Information to match when looking for cinder in the service catalog. -# -# When the ``cinder_endpoint_template`` is not set and any of -# ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, ``cinder_store_password`` is not set, -# cinder store uses this information to lookup cinder endpoint from the service -# catalog in the current context. ``cinder_os_region_name``, if set, is taken -# into consideration to fetch the appropriate endpoint. -# -# The service catalog can be listed by the ``openstack catalog list`` command. -# -# Possible values: -# * A string of of the following form: -# ``::`` -# At least ``service_type`` and ``interface`` should be specified. -# ``service_name`` can be omitted. -# -# Related options: -# * cinder_os_region_name -# * cinder_endpoint_template -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# -# (string value) -#cinder_catalog_info = volumev2::publicURL - -# -# Override service catalog lookup with template for cinder endpoint. -# -# When this option is set, this value is used to generate cinder endpoint, -# instead of looking up from the service catalog. -# This value is ignored if ``cinder_store_auth_address``, -# ``cinder_store_user_name``, ``cinder_store_project_name``, and -# ``cinder_store_password`` are specified. -# -# If this configuration option is set, ``cinder_catalog_info`` will be ignored. -# -# Possible values: -# * URL template string for cinder endpoint, where ``%%(tenant)s`` is -# replaced with the current tenant (project) name. -# For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# * cinder_catalog_info -# -# (string value) -#cinder_endpoint_template = - -# -# Region name to lookup cinder service from the service catalog. -# -# This is used only when ``cinder_catalog_info`` is used for determining the -# endpoint. If set, the lookup for cinder endpoint by this node is filtered to -# the specified region. It is useful when multiple regions are listed in the -# catalog. If this is not set, the endpoint is looked up from every region. -# -# Possible values: -# * A string that is a valid region name. -# -# Related options: -# * cinder_catalog_info -# -# (string value) -# Deprecated group/name - [glance_store]/os_region_name -#cinder_os_region_name = - -# -# Location of a CA certificates file used for cinder client requests. -# -# The specified CA certificates file, if set, is used to verify cinder -# connections via HTTPS endpoint. If the endpoint is HTTP, this value is -# ignored. -# ``cinder_api_insecure`` must be set to ``True`` to enable the verification. -# -# Possible values: -# * Path to a ca certificates file -# -# Related options: -# * cinder_api_insecure -# -# (string value) -#cinder_ca_certificates_file = - -# -# Number of cinderclient retries on failed http calls. -# -# When a call failed by any errors, cinderclient will retry the call up to the -# specified times after sleeping a few seconds. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_http_retries = 3 - -# -# Time period, in seconds, to wait for a cinder volume transition to -# complete. -# -# When the cinder volume is created, deleted, or attached to the glance node to -# read/write the volume data, the volume's state is changed. For example, the -# newly created volume status changes from ``creating`` to ``available`` after -# the creation process is completed. This specifies the maximum time to wait for -# the status change. If a timeout occurs while waiting, or the status is changed -# to an unexpected value (e.g. `error``), the image creation fails. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_state_transition_timeout = 300 - -# -# Allow to perform insecure SSL requests to cinder. -# -# If this option is set to True, HTTPS endpoint connection is verified using the -# CA certificates file specified by ``cinder_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * cinder_ca_certificates_file -# -# (boolean value) -#cinder_api_insecure = false - -# -# The address where the cinder authentication service is listening. -# -# When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, and ``cinder_store_password`` options are -# specified, the specified values are always used for the authentication. -# This is useful to hide the image volumes from users by storing them in a -# project/tenant specific to the image service. It also enables users to share -# the image volume among other projects under the control of glance's ACL. -# -# If either of these options are not set, the cinder endpoint is looked up -# from the service catalog, and current context's user and project are used. -# -# Possible values: -# * A valid authentication service address, for example: -# ``http://openstack.example.org/identity/v2.0`` -# -# Related options: -# * cinder_store_user_name -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_auth_address = - -# -# User name to authenticate against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid user name -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_user_name = - -# -# Password for the user authenticating against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid password for the user specified by ``cinder_store_user_name`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# -# (string value) -#cinder_store_password = - -# -# Project name where the image volume is stored in cinder. -# -# If this configuration option is not set, the project in current context is -# used. -# -# This must be used with all the following related options. If any of these are -# not specified, the project of the current context is used. -# -# Possible values: -# * A valid project name -# -# Related options: -# * ``cinder_store_auth_address`` -# * ``cinder_store_user_name`` -# * ``cinder_store_password`` -# -# (string value) -#cinder_store_project_name = - -# -# Path to the rootwrap configuration file to use for running commands as root. -# -# The cinder store requires root privileges to operate the image volumes (for -# connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). -# The configuration file should allow the required commands by cinder store and -# os-brick library. -# -# Possible values: -# * Path to the rootwrap config file -# -# Related options: -# * None -# -# (string value) -#rootwrap_config = /etc/glance/rootwrap.conf - -# -# Volume type that will be used for volume creation in cinder. -# -# Some cinder backends can have several volume types to optimize storage usage. -# Adding this option allows an operator to choose a specific volume type -# in cinder that can be optimized for images. -# -# If this is not set, then the default volume type specified in the cinder -# configuration will be used for volume creation. -# -# Possible values: -# * A valid volume type from cinder -# -# Related options: -# * None -# -# (string value) -#cinder_volume_type = - -# -# Directory to which the filesystem backend store writes images. -# -# Upon start up, Glance creates the directory if it doesn't already -# exist and verifies write access to the user under which -# ``glance-api`` runs. If the write access isn't available, a -# ``BadStoreConfiguration`` exception is raised and the filesystem -# store may not be available for adding new images. -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * A valid path to a directory -# -# Related options: -# * ``filesystem_store_datadirs`` -# * ``filesystem_store_file_perm`` -# -# (string value) -#filesystem_store_datadir = /var/lib/glance/images - -# -# List of directories and their priorities to which the filesystem -# backend store writes images. -# -# The filesystem store can be configured to store images in multiple -# directories as opposed to using a single directory specified by the -# ``filesystem_store_datadir`` configuration option. When using -# multiple directories, each directory can be given an optional -# priority to specify the preference order in which they should -# be used. Priority is an integer that is concatenated to the -# directory path with a colon where a higher value indicates higher -# priority. When two directories have the same priority, the directory -# with most free space is used. When no priority is specified, it -# defaults to zero. -# -# More information on configuring filesystem store with multiple store -# directories can be found at -# http://docs.openstack.org/developer/glance/configuring.html -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * List of strings of the following form: -# * ``:`` -# -# Related options: -# * ``filesystem_store_datadir`` -# * ``filesystem_store_file_perm`` -# -# (multi valued) -#filesystem_store_datadirs = - -# -# Filesystem store metadata file. -# -# The path to a file which contains the metadata to be returned with -# any location associated with the filesystem store. The file must -# contain a valid JSON object. The object should contain the keys -# ``id`` and ``mountpoint``. The value for both keys should be a -# string. -# -# Possible values: -# * A valid path to the store metadata file -# -# Related options: -# * None -# -# (string value) -#filesystem_store_metadata_file = - -# -# File access permissions for the image files. -# -# Set the intended file access permissions for image data. This provides -# a way to enable other services, e.g. Nova, to consume images directly -# from the filesystem store. The users running the services that are -# intended to be given access to could be made a member of the group -# that owns the files created. Assigning a value less then or equal to -# zero for this configuration option signifies that no changes be made -# to the default permissions. This value will be decoded as an octal -# digit. -# -# For more information, please refer the documentation at -# http://docs.openstack.org/developer/glance/configuring.html -# -# Possible values: -# * A valid file access permission -# * Zero -# * Any negative integer -# -# Related options: -# * None -# -# (integer value) -#filesystem_store_file_perm = 0 - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Certificate Authority file to verify the remote server certificate. If -# this option is set, the ``https_insecure`` option will be ignored and -# the CA file specified will be used to authenticate the server -# certificate and establish a secure connection to the server. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * https_insecure -# -# (string value) -#https_ca_certificates_file = - -# -# Set verification of the remote server certificate. -# -# This configuration option takes in a boolean value to determine -# whether or not to verify the remote server certificate. If set to -# True, the remote server certificate is not verified. If the option is -# set to False, then the default CA truststore is used for verification. -# -# This option is ignored if ``https_ca_certificates_file`` is set. -# The remote server certificate will then be verified using the file -# specified using the ``https_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * https_ca_certificates_file -# -# (boolean value) -#https_insecure = true - -# -# The http/https proxy information to be used to connect to the remote -# server. -# -# This configuration option specifies the http/https proxy information -# that should be used to connect to the remote server. The proxy -# information should be a key value pair of the scheme and proxy, for -# example, http:10.0.0.1:3128. You can also specify proxies for multiple -# schemes by separating the key value pairs with a comma, for example, -# http:10.0.0.1:3128, https:10.0.0.1:1080. -# -# Possible values: -# * A comma separated list of scheme:proxy pairs as described above -# -# Related options: -# * None -# -# (dict value) -#http_proxy_information = - -# -# Size, in megabytes, to chunk RADOS images into. -# -# Provide an integer value representing the size in megabytes to chunk -# Glance images into. The default chunk size is 8 megabytes. For optimal -# performance, the value should be a power of two. -# -# When Ceph's RBD object storage system is used as the storage backend -# for storing Glance images, the images are chunked into objects of the -# size set using this option. These chunked objects are then stored -# across the distributed block data store to use for Glance. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#rbd_store_chunk_size = 8 - -# -# RADOS pool in which images are stored. -# -# When RBD is used as the storage backend for storing Glance images, the -# images are stored by means of logical grouping of the objects (chunks -# of images) into a ``pool``. Each pool is defined with the number of -# placement groups it can contain. The default pool that is used is -# 'images'. -# -# More information on the RBD storage backend can be found here: -# http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ -# -# Possible Values: -# * A valid pool name -# -# Related options: -# * None -# -# (string value) -#rbd_store_pool = images - -# -# RADOS user to authenticate as. -# -# This configuration option takes in the RADOS user to authenticate as. -# This is only needed when RADOS authentication is enabled and is -# applicable only if the user is using Cephx authentication. If the -# value for this option is not set by the user or is set to None, a -# default value will be chosen, which will be based on the client. -# section in rbd_store_ceph_conf. -# -# Possible Values: -# * A valid RADOS user -# -# Related options: -# * rbd_store_ceph_conf -# -# (string value) -#rbd_store_user = - -# -# Ceph configuration file path. -# -# This configuration option takes in the path to the Ceph configuration -# file to be used. If the value for this option is not set by the user -# or is set to None, librados will locate the default configuration file -# which is located at /etc/ceph/ceph.conf. If using Cephx -# authentication, this file should include a reference to the right -# keyring in a client. section -# -# Possible Values: -# * A valid path to a configuration file -# -# Related options: -# * rbd_store_user -# -# (string value) -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# -# Timeout value for connecting to Ceph cluster. -# -# This configuration option takes in the timeout value in seconds used -# when connecting to the Ceph cluster i.e. it sets the time to wait for -# glance-api before closing the connection. This prevents glance-api -# hangups during the connection to RBD. If the value for this option -# is set to less than or equal to 0, no timeout is set and the default -# librados value is used. -# -# Possible Values: -# * Any integer value -# -# Related options: -# * None -# -# (integer value) -#rados_connect_timeout = 0 - -# -# Chunk size for images to be stored in Sheepdog data store. -# -# Provide an integer value representing the size in mebibyte -# (1048576 bytes) to chunk Glance images into. The default -# chunk size is 64 mebibytes. -# -# When using Sheepdog distributed storage system, the images are -# chunked into objects of this size and then stored across the -# distributed data store to use for Glance. -# -# Chunk sizes, if a power of two, help avoid fragmentation and -# enable improved performance. -# -# Possible values: -# * Positive integer value representing size in mebibytes. -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 1 -#sheepdog_store_chunk_size = 64 - -# -# Port number on which the sheep daemon will listen. -# -# Provide an integer value representing a valid port number on -# which you want the Sheepdog daemon to listen on. The default -# port is 7000. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages it receives on -# the port number set using ``sheepdog_store_port`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related Options: -# * sheepdog_store_address -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#sheepdog_store_port = 7000 - -# -# Address to bind the Sheepdog daemon to. -# -# Provide a string value representing the address to bind the -# Sheepdog daemon to. The default address set for the 'sheep' -# is 127.0.0.1. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages directed to the -# address set using ``sheepdog_store_address`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# * A valid hostname -# -# Related Options: -# * sheepdog_store_port -# -# (string value) -#sheepdog_store_address = 127.0.0.1 - -# -# Set verification of the server certificate. -# -# This boolean determines whether or not to verify the server -# certificate. If this option is set to True, swiftclient won't check -# for a valid SSL certificate when authenticating. If the option is set -# to False, then the default CA truststore is used for verification. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_cacert -# -# (boolean value) -#swift_store_auth_insecure = false - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to specify the path to -# a custom Certificate Authority file for SSL verification when -# connecting to Swift. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * swift_store_auth_insecure -# -# (string value) -#swift_store_cacert = /etc/ssl/certs/ca-certificates.crt - -# -# The region of Swift endpoint to use by Glance. -# -# Provide a string value representing a Swift region where Glance -# can connect to for image storage. By default, there is no region -# set. -# -# When Glance uses Swift as the storage backend to store images -# for a specific tenant that has multiple endpoints, setting of a -# Swift region with ``swift_store_region`` allows Glance to connect -# to Swift in the specified region as opposed to a single region -# connectivity. -# -# This option can be configured for both single-tenant and -# multi-tenant storage. -# -# NOTE: Setting the region with ``swift_store_region`` is -# tenant-specific and is necessary ``only if`` the tenant has -# multiple endpoints across different regions. -# -# Possible values: -# * A string value representing a valid Swift region. -# -# Related Options: -# * None -# -# (string value) -#swift_store_region = RegionTwo - -# -# The URL endpoint to use for Swift backend storage. -# -# Provide a string value representing the URL endpoint to use for -# storing Glance images in Swift store. By default, an endpoint -# is not set and the storage URL returned by ``auth`` is used. -# Setting an endpoint with ``swift_store_endpoint`` overrides the -# storage URL and is used for Glance image storage. -# -# NOTE: The URL should include the path up to, but excluding the -# container. The location of an object is obtained by appending -# the container and object to the configured URL. -# -# Possible values: -# * String value representing a valid URL path up to a Swift container -# -# Related Options: -# * None -# -# (string value) -#swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name - -# -# Endpoint Type of Swift service. -# -# This string value indicates the endpoint type to use to fetch the -# Swift endpoint. The endpoint type determines the actions the user will -# be allowed to perform, for instance, reading and writing to the Store. -# This setting is only used if swift_store_auth_version is greater than -# 1. -# -# Possible values: -# * publicURL -# * adminURL -# * internalURL -# -# Related options: -# * swift_store_endpoint -# -# (string value) -# Allowed values: publicURL, adminURL, internalURL -#swift_store_endpoint_type = publicURL - -# -# Type of Swift service to use. -# -# Provide a string value representing the service type to use for -# storing images while using Swift backend storage. The default -# service type is set to ``object-store``. -# -# NOTE: If ``swift_store_auth_version`` is set to 2, the value for -# this configuration option needs to be ``object-store``. If using -# a higher version of Keystone or a different auth scheme, this -# option may be modified. -# -# Possible values: -# * A string representing a valid service type for Swift storage. -# -# Related Options: -# * None -# -# (string value) -#swift_store_service_type = object-store - -# -# Name of single container to store images/name prefix for multiple containers -# -# When a single container is being used to store images, this configuration -# option indicates the container within the Glance account to be used for -# storing all images. When multiple containers are used to store images, this -# will be the name prefix for all containers. Usage of single/multiple -# containers can be controlled using the configuration option -# ``swift_store_multiple_containers_seed``. -# -# When using multiple containers, the containers will be named after the value -# set for this configuration option with the first N chars of the image UUID -# as the suffix delimited by an underscore (where N is specified by -# ``swift_store_multiple_containers_seed``). -# -# Example: if the seed is set to 3 and swift_store_container = ``glance``, then -# an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in -# the container ``glance_fda``. All dashes in the UUID are included when -# creating the container name but do not count toward the character limit, so -# when N=10 the container name would be ``glance_fdae39a1-ba.`` -# -# Possible values: -# * If using single container, this configuration option can be any string -# that is a valid swift container name in Glance's Swift account -# * If using multiple containers, this configuration option can be any -# string as long as it satisfies the container naming rules enforced by -# Swift. The value of ``swift_store_multiple_containers_seed`` should be -# taken into account as well. -# -# Related options: -# * ``swift_store_multiple_containers_seed`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (string value) -#swift_store_container = glance - -# -# The size threshold, in MB, after which Glance will start segmenting image -# data. -# -# Swift has an upper limit on the size of a single uploaded object. By default, -# this is 5GB. To upload objects bigger than this limit, objects are segmented -# into multiple smaller objects that are tied together with a manifest file. -# For more detail, refer to -# http://docs.openstack.org/developer/swift/overview_large_objects.html -# -# This configuration option specifies the size threshold over which the Swift -# driver will start segmenting image data into multiple smaller files. -# Currently, the Swift driver only supports creating Dynamic Large Objects. -# -# NOTE: This should be set by taking into account the large object limit -# enforced by the Swift cluster in consideration. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by the Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_chunk_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_size = 5120 - -# -# The maximum size, in MB, of the segments when image data is segmented. -# -# When image data is segmented to upload images that are larger than the limit -# enforced by the Swift cluster, image data is broken into segments that are no -# bigger than the size specified by this configuration option. -# Refer to ``swift_store_large_object_size`` for more detail. -# -# For example: if ``swift_store_large_object_size`` is 5GB and -# ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be -# segmented into 7 segments where the first six segments will be 1GB in size and -# the seventh segment will be 0.2GB. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_chunk_size = 200 - -# -# Create container, if it doesn't already exist, when uploading image. -# -# At the time of uploading an image, if the corresponding container doesn't -# exist, it will be created provided this configuration option is set to True. -# By default, it won't be created. This behavior is applicable for both single -# and multiple containers mode. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#swift_store_create_container_on_put = false - -# -# Store images in tenant's Swift account. -# -# This enables multi-tenant storage mode which causes Glance images to be stored -# in tenant specific Swift accounts. If this is disabled, Glance stores all -# images in its own account. More details multi-tenant store can be found at -# https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage -# -# NOTE: If using multi-tenant swift store, please make sure -# that you do not set a swift configuration file with the -# 'swift_store_config_file' option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_config_file -# -# (boolean value) -#swift_store_multi_tenant = false - -# -# Seed indicating the number of containers to use for storing images. -# -# When using a single-tenant store, images can be stored in one or more than one -# containers. When set to 0, all images will be stored in one single container. -# When set to an integer value between 1 and 32, multiple containers will be -# used to store images. This configuration option will determine how many -# containers are created. The total number of containers that will be used is -# equal to 16^N, so if this config option is set to 2, then 16^2=256 containers -# will be used to store images. -# -# Please refer to ``swift_store_container`` for more detail on the naming -# convention. More detail about using multiple containers can be found at -# https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- -# multiple-containers.html -# -# NOTE: This is used only when swift_store_multi_tenant is disabled. -# -# Possible values: -# * A non-negative integer less than or equal to 32 -# -# Related options: -# * ``swift_store_container`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (integer value) -# Minimum value: 0 -# Maximum value: 32 -#swift_store_multiple_containers_seed = 0 - -# -# List of tenants that will be granted admin access. -# -# This is a list of tenants that will be granted read/write access on -# all Swift containers created by Glance in multi-tenant mode. The -# default value is an empty list. -# -# Possible values: -# * A comma separated list of strings representing UUIDs of Keystone -# projects/tenants -# -# Related options: -# * None -# -# (list value) -#swift_store_admin_tenants = - -# -# SSL layer compression for HTTPS Swift requests. -# -# Provide a boolean value to determine whether or not to compress -# HTTPS Swift requests for images at the SSL layer. By default, -# compression is enabled. -# -# When using Swift as the backend store for Glance image storage, -# SSL layer compression of HTTPS Swift requests can be set using -# this option. If set to False, SSL layer compression of HTTPS -# Swift requests is disabled. Disabling this option may improve -# performance for images which are already in a compressed format, -# for example, qcow2. -# -# Possible values: -# * True -# * False -# -# Related Options: -# * None -# -# (boolean value) -#swift_store_ssl_compression = true - -# -# The number of times a Swift download will be retried before the -# request fails. -# -# Provide an integer value representing the number of times an image -# download must be retried before erroring out. The default value is -# zero (no retry on a failed image download). When set to a positive -# integer value, ``swift_store_retry_get_count`` ensures that the -# download is attempted this many more times upon a download failure -# before sending an error message. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_retry_get_count = 0 - -# -# Time in seconds defining the size of the window in which a new -# token may be requested before the current token is due to expire. -# -# Typically, the Swift storage driver fetches a new token upon the -# expiration of the current token to ensure continued access to -# Swift. However, some Swift transactions (like uploading image -# segments) may not recover well if the token expires on the fly. -# -# Hence, by fetching a new token before the current token expiration, -# we make sure that the token does not expire or is close to expiry -# before a transaction is attempted. By default, the Swift storage -# driver requests for a new token 60 seconds or less before the -# current token expiration. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_expire_soon_interval = 60 - -# -# Use trusts for multi-tenant Swift store. -# -# This option instructs the Swift store to create a trust for each -# add/get request when the multi-tenant store is in use. Using trusts -# allows the Swift store to avoid problems that can be caused by an -# authentication token expiring during the upload or download of data. -# -# By default, ``swift_store_use_trusts`` is set to ``True``(use of -# trusts is enabled). If set to ``False``, a user token is used for -# the Swift connection instead, eliminating the overhead of trust -# creation. -# -# NOTE: This option is considered only when -# ``swift_store_multi_tenant`` is set to ``True`` -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_multi_tenant -# -# (boolean value) -#swift_store_use_trusts = true - -# -# Reference to default Swift account/backing store parameters. -# -# Provide a string value representing a reference to the default set -# of parameters required for using swift account/backing store for -# image storage. The default reference value for this configuration -# option is 'ref1'. This configuration option dereferences the -# parameters and facilitates image storage in Swift storage backend -# every time a new image is added. -# -# Possible values: -# * A valid string value -# -# Related options: -# * None -# -# (string value) -#default_swift_reference = ref1 - -# DEPRECATED: Version of the authentication service to use. Valid versions are 2 -# and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_version' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_version = 2 - -# DEPRECATED: The address where the Swift authentication service is listening. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_address' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_address = - -# DEPRECATED: The user to authenticate against the Swift authentication service. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'user' in the Swift back-end configuration file is set instead. -#swift_store_user = - -# DEPRECATED: Auth key for the user authenticating against the Swift -# authentication service. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'key' in the Swift back-end configuration file is used -# to set the authentication key instead. -#swift_store_key = - -# -# Absolute path to the file containing the swift account(s) -# configurations. -# -# Include a string value representing the path to a configuration -# file that has references for each of the configured Swift -# account(s)/backing stores. By default, no file path is specified -# and customized Swift referencing is disabled. Configuring this -# option is highly recommended while using Swift storage backend for -# image storage as it avoids storage of credentials in the database. -# -# NOTE: Please do not configure this option if you have set -# ``swift_store_multi_tenant`` to ``True``. -# -# Possible values: -# * String value representing an absolute path on the glance-api -# node -# -# Related options: -# * swift_store_multi_tenant -# -# (string value) -#swift_store_config_file = - -# -# Address of the ESX/ESXi or vCenter Server target system. -# -# This configuration option sets the address of the ESX/ESXi or vCenter -# Server target system. This option is required when using the VMware -# storage backend. The address can contain an IP address (127.0.0.1) or -# a DNS name (www.my-domain.com). -# -# Possible Values: -# * A valid IPv4 or IPv6 address -# * A valid DNS name -# -# Related options: -# * vmware_server_username -# * vmware_server_password -# -# (string value) -#vmware_server_host = 127.0.0.1 - -# -# Server username. -# -# This configuration option takes the username for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is the username for a user with appropriate -# privileges -# -# Related options: -# * vmware_server_host -# * vmware_server_password -# -# (string value) -#vmware_server_username = root - -# -# Server password. -# -# This configuration option takes the password for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is a password corresponding to the username -# specified using the "vmware_server_username" option -# -# Related options: -# * vmware_server_host -# * vmware_server_username -# -# (string value) -#vmware_server_password = vmware - -# -# The number of VMware API retries. -# -# This configuration option specifies the number of times the VMware -# ESX/VC server API must be retried upon connection related issues or -# server API call overload. It is not possible to specify 'retry -# forever'. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_api_retry_count = 10 - -# -# Interval in seconds used for polling remote tasks invoked on VMware -# ESX/VC server. -# -# This configuration option takes in the sleep time in seconds for polling an -# on-going async task as part of the VMWare ESX/VC server API call. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_task_poll_interval = 5 - -# -# The directory where the glance images will be stored in the datastore. -# -# This configuration option specifies the path to the directory where the -# glance images will be stored in the VMware datastore. If this option -# is not set, the default directory where the glance images are stored -# is openstack_glance. -# -# Possible Values: -# * Any string that is a valid path to a directory -# -# Related options: -# * None -# -# (string value) -#vmware_store_image_dir = /openstack_glance - -# -# Set verification of the ESX/vCenter server certificate. -# -# This configuration option takes a boolean value to determine -# whether or not to verify the ESX/vCenter server certificate. If this -# option is set to True, the ESX/vCenter server certificate is not -# verified. If this option is set to False, then the default CA -# truststore is used for verification. -# -# This option is ignored if the "vmware_ca_file" option is set. In that -# case, the ESX/vCenter server certificate will then be verified using -# the file specified using the "vmware_ca_file" option . -# -# Possible Values: -# * True -# * False -# -# Related options: -# * vmware_ca_file -# -# (boolean value) -# Deprecated group/name - [glance_store]/vmware_api_insecure -#vmware_insecure = false - -# -# Absolute path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Cerificate Authority File to verify the ESX/vCenter certificate. -# -# If this option is set, the "vmware_insecure" option will be ignored -# and the CA file specified will be used to authenticate the ESX/vCenter -# server certificate and establish a secure connection to the server. -# -# Possible Values: -# * Any string that is a valid absolute path to a CA file -# -# Related options: -# * vmware_insecure -# -# (string value) -#vmware_ca_file = /etc/ssl/certs/ca-certificates.crt - -# -# The datastores where the image can be stored. -# -# This configuration option specifies the datastores where the image can -# be stored in the VMWare store backend. This option may be specified -# multiple times for specifying multiple datastores. The datastore name -# should be specified after its datacenter path, separated by ":". An -# optional weight may be given after the datastore name, separated again -# by ":" to specify the priority. Thus, the required format becomes -# ::. -# -# When adding an image, the datastore with highest weight will be -# selected, unless there is not enough free space available in cases -# where the image size is already known. If no weight is given, it is -# assumed to be zero and the directory will be considered for selection -# last. If multiple datastores have the same weight, then the one with -# the most free space available is selected. -# -# Possible Values: -# * Any string of the format: -# :: -# -# Related options: -# * None -# -# (multi valued) -#vmware_datastores = - - -[keystone_authtoken] - -# -# From keystonemiddleware.auth_token -# - -# Complete "public" Identity API endpoint. This endpoint should not be an -# "admin" endpoint, as it should be accessible by all end users. Unauthenticated -# clients are redirected to this endpoint to authenticate. Although this -# endpoint should ideally be unversioned, client support in the wild varies. -# If you're using a versioned v2 endpoint here, then this should *not* be the -# same endpoint the service user utilizes for validating tokens, because normal -# end users may not be able to reach that endpoint. (string value) -#auth_uri = - -# API version of the admin Identity API endpoint. (string value) -#auth_version = - -# Do not handle authorization requests within the middleware, but delegate the -# authorization decision to downstream WSGI components. (boolean value) -#delay_auth_decision = false - -# Request timeout value for communicating with Identity API server. (integer -# value) -#http_connect_timeout = - -# How many times are we trying to reconnect when communicating with Identity API -# Server. (integer value) -#http_request_max_retries = 3 - -# Request environment key where the Swift cache object is stored. When -# auth_token middleware is deployed with a Swift cache, use this option to have -# the middleware share a caching backend with swift. Otherwise, use the -# ``memcached_servers`` option instead. (string value) -#cache = - -# Required if identity server requires client certificate (string value) -#certfile = - -# Required if identity server requires client certificate (string value) -#keyfile = - -# A PEM encoded Certificate Authority to use when verifying HTTPs connections. -# Defaults to system CAs. (string value) -#cafile = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# The region in which the identity server can be found. (string value) -#region_name = - -# DEPRECATED: Directory used to cache files related to PKI tokens. This option -# has been deprecated in the Ocata release and will be removed in the P release. -# (string value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#signing_dir = - -# Optionally specify a list of memcached server(s) to use for caching. If left -# undefined, tokens will instead be cached in-process. (list value) -# Deprecated group/name - [keystone_authtoken]/memcache_servers -#memcached_servers = - -# In order to prevent excessive effort spent validating tokens, the middleware -# caches previously-seen tokens for a configurable duration (in seconds). Set to -# -1 to disable caching completely. (integer value) -#token_cache_time = 300 - -# DEPRECATED: Determines the frequency at which the list of revoked tokens is -# retrieved from the Identity service (in seconds). A high number of revocation -# events combined with a low cache duration may significantly reduce -# performance. Only valid for PKI tokens. This option has been deprecated in the -# Ocata release and will be removed in the P release. (integer value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#revocation_cache_time = 10 - -# (Optional) If defined, indicate whether token data should be authenticated or -# authenticated and encrypted. If MAC, token data is authenticated (with HMAC) -# in the cache. If ENCRYPT, token data is encrypted and authenticated in the -# cache. If the value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -# Allowed values: None, MAC, ENCRYPT -#memcache_security_strategy = None - -# (Optional, mandatory if memcache_security_strategy is defined) This string is -# used for key derivation. (string value) -#memcache_secret_key = - -# (Optional) Number of seconds memcached server is considered dead before it is -# tried again. (integer value) -#memcache_pool_dead_retry = 300 - -# (Optional) Maximum total number of open connections to every memcached server. -# (integer value) -#memcache_pool_maxsize = 10 - -# (Optional) Socket timeout in seconds for communicating with a memcached -# server. (integer value) -#memcache_pool_socket_timeout = 3 - -# (Optional) Number of seconds a connection to memcached is held unused in the -# pool before it is closed. (integer value) -#memcache_pool_unused_timeout = 60 - -# (Optional) Number of seconds that an operation will wait to get a memcached -# client connection from the pool. (integer value) -#memcache_pool_conn_get_timeout = 10 - -# (Optional) Use the advanced (eventlet safe) memcached client pool. The -# advanced pool will only work under python 2.x. (boolean value) -#memcache_use_advanced_pool = false - -# (Optional) Indicate whether to set the X-Service-Catalog header. If False, -# middleware will not ask for service catalog on token validation and will not -# set the X-Service-Catalog header. (boolean value) -#include_service_catalog = true - -# Used to control the use and type of token binding. Can be set to: "disabled" -# to not check token binding. "permissive" (default) to validate binding -# information if the bind type is of a form known to the server and ignore it if -# not. "strict" like "permissive" but if the bind type is unknown the token will -# be rejected. "required" any form of token binding is needed to be allowed. -# Finally the name of a binding method that must be present in tokens. (string -# value) -#enforce_token_bind = permissive - -# DEPRECATED: If true, the revocation list will be checked for cached tokens. -# This requires that PKI tokens are configured on the identity server. (boolean -# value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#check_revocations_for_cached = false - -# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those supported by Python -# standard hashlib.new(). The hashes will be tried in the order given, so put -# the preferred one first for performance. The result of the first hash will be -# stored in the cache. This will typically be set to multiple values only while -# migrating from a less secure algorithm to a more secure one. Once all the old -# tokens are expired this option should be set to a single value for better -# performance. (list value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#hash_algorithms = md5 - -# A choice of roles that must be present in a service token. Service tokens are -# allowed to request that an expired token can be used and so this check should -# tightly control that only actual services should be sending this token. Roles -# here are applied as an ANY check so any role in this list must be present. For -# backwards compatibility reasons this currently only affects the allow_expired -# check. (list value) -#service_token_roles = service - -# For backwards compatibility reasons we must let valid service tokens pass that -# don't pass the service_token_roles check as valid. Setting this true will -# become the default in a future release and should be enabled if possible. -# (boolean value) -#service_token_roles_required = false - -# Authentication type to load (string value) -# Deprecated group/name - [keystone_authtoken]/auth_plugin -#auth_type = - -# Config Section from which to load plugin specific options (string value) -#auth_section = - - -[paste_deploy] - -# -# From glance.glare -# - -# -# Deployment flavor to use in the server application pipeline. -# -# Provide a string value representing the appropriate deployment -# flavor used in the server application pipleline. This is typically -# the partial name of a pipeline in the paste configuration file with -# the service name removed. -# -# For example, if your paste section name in the paste configuration -# file is [pipeline:glance-api-keystone], set ``flavor`` to -# ``keystone``. -# -# Possible values: -# * String value representing a partial pipeline name. -# -# Related Options: -# * config_file -# -# (string value) -#flavor = keystone - -# -# Name of the paste configuration file. -# -# Provide a string value representing the name of the paste -# configuration file to use for configuring piplelines for -# server application deployments. -# -# NOTES: -# * Provide the name or the path relative to the glance directory -# for the paste configuration file and not the absolute path. -# * The sample paste configuration file shipped with Glance need -# not be edited in most cases as it comes with ready-made -# pipelines for all common deployment flavors. -# -# If no value is specified for this option, the ``paste.ini`` file -# with the prefix of the corresponding Glance service's configuration -# file name will be searched for in the known configuration -# directories. (For example, if this option is missing from or has no -# value set in ``glance-api.conf``, the service will look for a file -# named ``glance-api-paste.ini``.) If the paste configuration file is -# not found, the service will not start. -# -# Possible values: -# * A string value representing the name of the paste configuration -# file. -# -# Related Options: -# * flavor -# -# (string value) -#config_file = glance-api-paste.ini - - -[profiler] - -# -# From glance.glare -# - -# -# Enables the profiling for all services on this node. Default value is False -# (fully disable the profiling feature). -# -# Possible values: -# -# * True: Enables the feature -# * False: Disables the feature. The profiling cannot be started via this -# project -# operations. If the profiling is triggered by another project, this project -# part -# will be empty. -# (boolean value) -# Deprecated group/name - [profiler]/profiler_enabled -#enabled = false - -# -# Enables SQL requests profiling in services. Default value is False (SQL -# requests won't be traced). -# -# Possible values: -# -# * True: Enables SQL requests profiling. Each SQL query will be part of the -# trace and can the be analyzed by how much time was spent for that. -# * False: Disables SQL requests profiling. The spent time is only shown on a -# higher level of operations. Single SQL queries cannot be analyzed this -# way. -# (boolean value) -#trace_sqlalchemy = false - -# -# Secret key(s) to use for encrypting context data for performance profiling. -# This string value should have the following format: [,,...], -# where each key is some random string. A user who triggers the profiling via -# the REST API has to set one of these keys in the headers of the REST API call -# to include profiling results of this node for this particular project. -# -# Both "enabled" flag and "hmac_keys" config options should be set to enable -# profiling. Also, to generate correct profiling information across all services -# at least one key needs to be consistent between OpenStack projects. This -# ensures it can be used from client side to generate the trace, containing -# information from all possible resources. (string value) -#hmac_keys = SECRET_KEY - -# -# Connection string for a notifier backend. Default value is messaging:// which -# sets the notifier to oslo_messaging. -# -# Examples of possible values: -# -# * messaging://: use oslo_messaging driver for sending notifications. -# * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. -# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending -# notifications. -# (string value) -#connection_string = messaging:// - -# -# Document type for notification indexing in elasticsearch. -# (string value) -#es_doc_type = notification - -# -# This parameter is a time value parameter (for example: es_scroll_time=2m), -# indicating for how long the nodes that participate in the search will maintain -# relevant resources in order to continue and support it. -# (string value) -#es_scroll_time = 2m - -# -# Elasticsearch splits large requests in batches. This parameter defines -# maximum size of each batch (for example: es_scroll_size=10000). -# (integer value) -#es_scroll_size = 10000 - -# -# Redissentinel provides a timeout option on the connections. -# This parameter defines that timeout (for example: socket_timeout=0.1). -# (floating point value) -#socket_timeout = 0.1 - -# -# Redissentinel uses a service name to identify a master redis service. -# This parameter defines the name (for example: -# sentinal_service_name=mymaster). -# (string value) -#sentinel_service_name = mymaster diff --git a/etc/oslo-config-generator/glance-glare.conf b/etc/oslo-config-generator/glance-glare.conf deleted file mode 100644 index eb6cd89a01..0000000000 --- a/etc/oslo-config-generator/glance-glare.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-glare.conf.sample -namespace = glance.glare -namespace = glance.store -namespace = oslo.db -namespace = oslo.db.concurrency -namespace = keystonemiddleware.auth_token -namespace = oslo.log -namespace = oslo.middleware.cors diff --git a/glance/api/glare/__init__.py b/glance/api/glare/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/api/glare/v0_1/__init__.py b/glance/api/glare/v0_1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/api/glare/v0_1/glare.py b/glance/api/glare/v0_1/glare.py deleted file mode 100644 index 4609cf812a..0000000000 --- a/glance/api/glare/v0_1/glare.py +++ /dev/null @@ -1,922 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -import glance_store -import jsonschema -from oslo_config import cfg -from oslo_serialization import jsonutils as json -from oslo_utils import encodeutils -from oslo_utils import excutils -import semantic_version -import six -import six.moves.urllib.parse as urlparse -import webob.exc - -from glance.common import exception -from glance.common.glare import loader -from glance.common.glare import serialization -from glance.common import jsonpatchvalidator -from glance.common import utils -from glance.common import wsgi -import glance.db -from glance.glare import gateway -from glance.glare import Showlevel -from glance.i18n import _, _LE -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") - - -class ArtifactsController(object): - def __init__(self, db_api=None, store_api=None, plugins=None): - self.db_api = db_api or glance.db.get_api() - self.store_api = store_api or glance_store - self.plugins = plugins or loader.ArtifactsPluginLoader( - 'glance.artifacts.types') - self.gateway = gateway.Gateway(self.db_api, - self.store_api, self.plugins) - - @staticmethod - def _do_update_op(artifact, change): - """Call corresponding method of the updater proxy. - - Here 'change' is a typical jsonpatch request dict: - * 'path' - a json-pointer string; - * 'op' - one of the allowed operation types; - * 'value' - value to set (omitted when op = remove) - """ - update_op = getattr(artifact, change['op']) - update_op(change['path'], change.get('value')) - return artifact - - @staticmethod - def _get_artifact_with_dependencies(repo, art_id, - type_name=None, type_version=None): - """Retrieves an artifact with dependencies from db by its id. - - Show level is direct (only direct dependencies are shown). - """ - return repo.get(art_id, show_level=Showlevel.DIRECT, - type_name=type_name, type_version=type_version) - - def show(self, req, type_name, type_version, - show_level=Showlevel.TRANSITIVE, **kwargs): - """Retrieves one artifact by id with its dependencies""" - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - art_id = kwargs.get('id') - artifact = artifact_repo.get(art_id, type_name=type_name, - type_version=type_version, - show_level=show_level) - return artifact - except exception.ArtifactNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - - def list(self, req, type_name, type_version, state, **kwargs): - """Retrieves a list of artifacts that match some params""" - artifact_repo = self.gateway.get_artifact_repo(req.context) - filters = kwargs.pop('filters', {}) - - filters.update(type_name={'value': type_name}, - state={'value': state}) - if type_version is not None: - filters['type_version'] = {'value': type_version} - if 'version' in filters: - for filter in filters['version']: - if filter['value'] == 'latest': - if 'name' not in filters: - raise webob.exc.HTTPBadRequest( - 'Filtering by latest version without specifying' - ' a name is not supported.') - filter['value'] = self._get_latest_version( - req, filters['name'][0]['value'], type_name, - type_version) - else: - try: - semantic_version.Version(filter['value'], partial=True) - except ValueError: - msg = (_('The format of the version %s is not valid. ' - 'Use semver notation') % filter['value']) - raise webob.exc.HTTPBadRequest(explanation=msg) - - res = artifact_repo.list(filters=filters, - show_level=Showlevel.BASIC, - **kwargs) - result = {'artifacts': res} - limit = kwargs.get("limit") - if limit is not None and len(res) != 0 and len(res) == limit: - result['next_marker'] = res[-1].id - return result - - def _get_latest_version(self, req, name, type_name, type_version=None, - state='creating'): - artifact_repo = self.gateway.get_artifact_repo(req.context) - filters = dict(name=[{"value": name}], - type_name={"value": type_name}, - state={"value": state}) - if type_version is not None: - filters["type_version"] = {"value": type_version} - result = artifact_repo.list(filters=filters, - show_level=Showlevel.NONE, - sort_keys=[('version', None)]) - if len(result): - return result[0].version - - msg = "No artifacts have been found" - raise exception.ArtifactNotFound(message=msg) - - @utils.mutating - def create(self, req, artifact_type, artifact_data, **kwargs): - try: - artifact_factory = self.gateway.get_artifact_type_factory( - req.context, artifact_type) - new_artifact = artifact_factory.new_artifact(**artifact_data) - artifact_repo = self.gateway.get_artifact_repo(req.context) - artifact_repo.add(new_artifact) - # retrieve artifact from db - return self._get_artifact_with_dependencies(artifact_repo, - new_artifact.id) - except (TypeError, - exception.ArtifactNotFound, - exception.Invalid, - exception.DuplicateLocation) as e: - raise webob.exc.HTTPBadRequest(explanation=e) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.LimitExceeded as e: - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - @utils.mutating - def update_property(self, req, id, type_name, type_version, path, data, - **kwargs): - """Updates a single property specified by request url.""" - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - artifact = self._get_artifact_with_dependencies(artifact_repo, id, - type_name, - type_version) - self._ensure_write_access(artifact, req.context) - if artifact.metadata.attributes.blobs.get(path) is not None: - msg = _('Invalid Content-Type for work with %s') % path - raise webob.exc.HTTPBadRequest(explanation=msg) - - # use updater mixin to perform updates: generate update path - if req.method == "PUT": - # replaces existing value or creates a new one - if getattr(artifact, kwargs["attr"]): - artifact.replace(path=path, value=data) - else: - artifact.add(path=path, value=data) - else: - # append to an existing value or create a new one - artifact.add(path=path, value=data) - artifact_repo.save(artifact) - return self._get_artifact_with_dependencies(artifact_repo, id) - except (exception.InvalidArtifactPropertyValue, - exception.ArtifactInvalidProperty, - exception.InvalidJsonPatchPath, - exception.ArtifactCircularDependency) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - @utils.mutating - def update(self, req, id, type_name, type_version, changes, **kwargs): - """Performs an update via json patch request""" - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - artifact = self._get_artifact_with_dependencies(artifact_repo, id, - type_name, - type_version) - self._ensure_write_access(artifact, req.context) - updated = artifact - for change in changes: - if artifact.metadata.attributes.blobs.get(change['path']): - msg = _('Invalid request PATCH for work with blob') - raise webob.exc.HTTPBadRequest(explanation=msg) - else: - updated = self._do_update_op(updated, change) - artifact_repo.save(updated) - return self._get_artifact_with_dependencies(artifact_repo, id) - except (exception.InvalidJsonPatchPath, - exception.Invalid) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.StorageQuotaFull as e: - msg = (_("Denying attempt to upload artifact because it exceeds " - "the quota: %s") % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=msg, request=req, content_type='text/plain') - except exception.LimitExceeded as e: - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - @utils.mutating - def delete(self, req, id, type_name, type_version, **kwargs): - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - artifact = self._get_artifact_with_dependencies( - artifact_repo, id, type_name=type_name, - type_version=type_version) - self._ensure_write_access(artifact, req.context) - artifact_repo.remove(artifact) - except exception.Invalid as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except (glance_store.Forbidden, exception.Forbidden) as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except (glance_store.NotFound, exception.NotFound) as e: - msg = (_("Failed to find artifact %(artifact_id)s to delete") % - {'artifact_id': id}) - raise webob.exc.HTTPNotFound(explanation=msg) - except glance_store.exceptions.InUseByStore as e: - msg = (_("Artifact %s could not be deleted " - "because it is in use: %s") % (id, e.msg)) # noqa - raise webob.exc.HTTPConflict(explanation=msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - @utils.mutating - def publish(self, req, id, type_name, type_version, **kwargs): - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - artifact = self._get_artifact_with_dependencies( - artifact_repo, id, type_name=type_name, - type_version=type_version) - self._ensure_write_access(artifact, req.context) - return artifact_repo.publish(artifact, context=req.context) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Invalid as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - def _upload_list_property(self, method, blob_list, index, data, size): - if method == 'PUT' and not index and len(blob_list) > 0: - # PUT replaces everything, so PUT to non-empty collection is - # forbidden - raise webob.exc.HTTPMethodNotAllowed( - explanation=_("Unable to PUT to non-empty collection")) - if index is not None and index > len(blob_list): - raise webob.exc.HTTPBadRequest( - explanation=_("Index is out of range")) - if index is None: - # both POST and PUT create a new blob list - blob_list.append((data, size)) - elif method == 'POST': - blob_list.insert(index, (data, size)) - else: - blob_list[index] = (data, size) - - @utils.mutating - def upload(self, req, id, type_name, type_version, attr, size, data, - index, **kwargs): - artifact_repo = self.gateway.get_artifact_repo(req.context) - artifact = None - try: - artifact = self._get_artifact_with_dependencies(artifact_repo, - id, - type_name, - type_version) - self._ensure_write_access(artifact, req.context) - blob_prop = artifact.metadata.attributes.blobs.get(attr) - if blob_prop is None: - raise webob.exc.HTTPBadRequest( - explanation=_("Not a blob property '%s'") % attr) - if isinstance(blob_prop, list): - blob_list = getattr(artifact, attr) - self._upload_list_property(req.method, blob_list, - index, data, size) - else: - if index is not None: - raise webob.exc.HTTPBadRequest( - explanation=_("Not a list property '%s'") % attr) - setattr(artifact, attr, (data, size)) - artifact_repo.save(artifact) - return artifact - - except ValueError as e: - exc_message = encodeutils.exception_to_unicode(e) - LOG.debug("Cannot save data for artifact %(id)s: %(e)s", - {'id': id, 'e': exc_message}) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPBadRequest( - explanation=exc_message) - - except glance_store.StoreAddDisabled: - msg = _("Error in store configuration. Adding artifacts to store " - "is disabled.") - LOG.exception(msg) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPGone(explanation=msg, request=req, - content_type='text/plain') - - except (glance_store.Duplicate, - exception.InvalidImageStatusTransition) as e: - LOG.exception(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPConflict(explanation=e.msg, request=req) - - except exception.Forbidden as e: - msg = ("Not allowed to upload data for artifact %s" % - id) - LOG.debug(msg) - raise webob.exc.HTTPForbidden(explanation=msg, request=req) - - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - - except glance_store.StorageFull as e: - msg = _("Artifact storage media " - "is full: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.StorageQuotaFull as e: - msg = _("Artifact exceeds the storage " - "quota: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.ImageSizeLimitExceeded as e: - msg = _("The incoming artifact blob is " - "too large: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except glance_store.StorageWriteDenied as e: - msg = _("Insufficient permissions on artifact " - "storage media: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(artifact_repo, artifact) - raise webob.exc.HTTPServiceUnavailable(explanation=msg, - request=req) - - except webob.exc.HTTPGone as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to upload artifact blob data due to" - " HTTP error")) - - except webob.exc.HTTPError as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to upload artifact blob data due to HTTP" - " error")) - self._restore(artifact_repo, artifact) - - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to upload artifact blob data due to " - "internal error")) - self._restore(artifact_repo, artifact) - - def download(self, req, id, type_name, type_version, attr, index, - **kwargs): - artifact_repo = self.gateway.get_artifact_repo(req.context) - try: - artifact = artifact_repo.get(id, type_name, type_version) - if attr in artifact.metadata.attributes.blobs: - if isinstance(artifact.metadata.attributes.blobs[attr], list): - if index is None: - raise webob.exc.HTTPBadRequest( - explanation=_("Index is required")) - blob_list = getattr(artifact, attr) - try: - return blob_list[index] - except IndexError as e: - raise webob.exc.HTTPBadRequest(explanation=e.message) - else: - if index is not None: - raise webob.exc.HTTPBadRequest(_("Not a list " - "property")) - return getattr(artifact, attr) - else: - message = _("Not a downloadable entity") - raise webob.exc.HTTPBadRequest(explanation=message) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except (glance_store.NotFound, exception.NotFound) as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Invalid as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - def _restore(self, artifact_repo, artifact): - """Restore the artifact to queued status. - - :param artifact_repo: The instance of ArtifactRepo - :param artifact: The artifact will be restored - """ - try: - if artifact_repo and artifact: - artifact.state = 'creating' - artifact_repo.save(artifact) - except Exception as e: - msg = (_LE("Unable to restore artifact %(artifact_id)s: %(e)s") % - {'artifact_id': artifact.id, - 'e': encodeutils.exception_to_unicode(e)}) - LOG.exception(msg) - - def list_artifact_types(self, req): - plugins = self.plugins.plugin_map - response = [] - base_link = "%s/v0.1/artifacts" % (CONF.public_endpoint or - req.host_url) - - for type_name, plugin in six.iteritems(plugins.get("by_typename")): - - metadata = dict( - type_name=type_name, - displayed_name=plugin[0].metadata.type_display_name, - versions=[] - ) - - for version in plugin: - endpoint = version.metadata.endpoint - type_version = "v" + version.metadata.type_version - version_metadata = dict( - id=type_version, - link="%s/%s/%s" % (base_link, endpoint, type_version) - ) - type_description = version.metadata.type_description - if type_description is not None: - version_metadata['description'] = type_description - metadata['versions'].append(version_metadata) - response.append(metadata) - - return {"artifact_types": response} - - @staticmethod - def _ensure_write_access(artifact, context): - if context.is_admin: - return - if context.owner is None or context.owner != artifact.owner: - raise exception.ArtifactForbidden(id=artifact.id) - - -class RequestDeserializer(wsgi.JSONRequestDeserializer, - jsonpatchvalidator.JsonPatchValidatorMixin): - _available_sort_keys = ('name', 'status', 'container_format', - 'disk_format', 'size', 'id', 'created_at', - 'updated_at', 'version') - _default_sort_dir = 'desc' - - _max_limit_number = 1000 - - def __init__(self, schema=None, plugins=None): - super(RequestDeserializer, self).__init__( - methods_allowed=["replace", "remove", "add"]) - self.plugins = plugins or loader.ArtifactsPluginLoader( - 'glance.artifacts.types') - - def _validate_show_level(self, show_level): - try: - return Showlevel.from_str(show_level.strip().lower()) - except exception.ArtifactUnsupportedShowLevel as e: - raise webob.exc.HTTPBadRequest(explanation=e.message) - - def show(self, req): - res = self._process_type_from_request(req, True) - params = req.params.copy() - show_level = params.pop('show_level', None) - if show_level is not None: - res['show_level'] = self._validate_show_level(show_level) - return res - - def _get_request_body(self, req): - output = super(RequestDeserializer, self).default(req) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - def validate_body(self, request): - try: - body = self._get_request_body(request) - return super(RequestDeserializer, self).validate_body(body) - except exception.JsonPatchException as e: - raise webob.exc.HTTPBadRequest(explanation=e) - - def default(self, request): - return self._process_type_from_request(request) - - def _check_type_version(self, type_version): - try: - semantic_version.Version(type_version, partial=True) - except ValueError as e: - raise webob.exc.HTTPBadRequest(explanation=e) - - def _process_type_from_request(self, req, - allow_implicit_version=False): - try: - type_name = req.urlvars.get('type_name') - type_version = req.urlvars.get('type_version') - if type_version is not None: - self._check_type_version(type_version) - # Even if the type_version is not specified and - # 'allow_implicit_version' is False, this call is still needed to - # ensure that at least one version of this type exists. - artifact_type = self.plugins.get_class_by_endpoint(type_name, - type_version) - res = { - 'type_name': artifact_type.metadata.type_name, - 'type_version': - artifact_type.metadata.type_version - if type_version is not None else None - } - if allow_implicit_version: - res['artifact_type'] = artifact_type - return res - - except exception.ArtifactPluginNotFound as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - def _validate_headers(self, req, content_type='application/json'): - header = req.headers.get('Content-Type') - if header != content_type: - msg = _('Invalid headers "Content-Type": %s') % header - raise webob.exc.HTTPBadRequest(explanation=msg) - - def create(self, req): - self._validate_headers(req) - res = self._process_type_from_request(req, True) - res["artifact_data"] = self._get_request_body(req) - return res - - def update(self, req): - self._validate_headers(req) - res = self._process_type_from_request(req) - res["changes"] = self.validate_body(req) - return res - - def update_property(self, req): - self._validate_headers(req) - """Data is expected in form {'data': ...}""" - res = self._process_type_from_request(req) - data_schema = { - "type": "object", - "properties": {"data": {}}, - "required": ["data"], - "$schema": "http://json-schema.org/draft-04/schema#"} - try: - json_body = json.loads(req.body) - jsonschema.validate(json_body, data_schema) - # TODO(ivasilevskaya): - # by now the deepest nesting level == 1 (ex. some_list/3), - # has to be fixed for dict properties - attr = req.urlvars["attr"] - path_left = req.urlvars["path_left"] - path = (attr if not path_left - else "%(attr)s/%(path_left)s" % {'attr': attr, - 'path_left': path_left}) - res.update(data=json_body["data"], path=path) - return res - except (ValueError, jsonschema.ValidationError) as e: - msg = _("Invalid json body: %s") % e.message - raise webob.exc.HTTPBadRequest(explanation=msg) - - def upload(self, req): - self._validate_headers(req, content_type='application/octet-stream') - res = self._process_type_from_request(req) - index = req.urlvars.get('path_left') - try: - # for blobs only one level of indexing is supported - # (ex. bloblist/0) - if index is not None: - index = int(index) - except ValueError: - msg = _("Only list indexes are allowed for blob lists") - raise webob.exc.HTTPBadRequest(explanation=msg) - artifact_size = req.content_length or None - res.update(size=artifact_size, data=req.body_file, - index=index) - return res - - def download(self, req): - res = self._process_type_from_request(req) - index = req.urlvars.get('index') - if index is not None: - index = int(index) - res.update(index=index) - return res - - def _validate_limit(self, limit): - if limit is None: - return self._max_limit_number - try: - limit = int(limit) - except ValueError: - msg = _("Limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("Limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit > self._max_limit_number: - msg = _("Limit param" - " must not be higher than %d") % self._max_limit_number - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def _validate_sort_key(self, sort_key, artifact_type, type_version=None): - if sort_key in self._available_sort_keys: - return sort_key, None - elif type_version is None: - msg = (_('Invalid sort key: %(sort_key)s. ' - 'If type version is not set it must be one of' - ' the following: %(available)s.') % - {'sort_key': sort_key, - 'available': ', '.join(self._available_sort_keys)}) - raise webob.exc.HTTPBadRequest(explanation=msg) - prop_type = artifact_type.metadata.attributes.all.get(sort_key) - if prop_type is None or prop_type.DB_TYPE not in ['string', - 'numeric', - 'int', - 'bool']: - msg = (_('Invalid sort key: %(sort_key)s. ' - 'You cannot sort by this property') % - {'sort_key': sort_key}) - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_key, prop_type.DB_TYPE - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _get_sorting_params(self, params, artifact_type, type_version=None): - - sort_keys = [] - sort_dirs = [] - - if 'sort' in params: - for sort_param in params.pop('sort').strip().split(','): - key, _sep, dir = sort_param.partition(':') - if not dir: - dir = self._default_sort_dir - sort_keys.append(self._validate_sort_key(key.strip(), - artifact_type, - type_version)) - sort_dirs.append(self._validate_sort_dir(dir.strip())) - - if not sort_keys: - sort_keys = [('created_at', None)] - if not sort_dirs: - sort_dirs = [self._default_sort_dir] - - return sort_keys, sort_dirs - - def _bring_to_type(self, type_name, value): - mapper = {'int': int, - 'string': str, - 'text': str, - 'bool': bool, - 'numeric': float} - return mapper[type_name](value) - - def _get_filters(self, artifact_type, params): - error_msg = 'Unexpected filter property' - filters = dict() - for filter, raw_value in params.items(): - - # first, get the comparison operator - left, sep, right = raw_value.strip().partition(':') - if not sep: - op = "default" - value = left.strip() - else: - op = left.strip().upper() - value = right.strip() - - # then, understand what's the property to filter and its value - if '.' in filter: # Indicates a dict-valued property with a key - prop_name, key = filter.split('.', 1) - else: - prop_name = filter - key = None - prop_type = artifact_type.metadata.attributes.all.get(prop_name) - if prop_type is None: - raise webob.exc.HTTPBadRequest(error_msg) - key_only_check = False - position = None - if isinstance(prop_type, dict): - if key is None: - key = value - val = None - key_only_check = True - else: - val = value - - if isinstance(prop_type.properties, dict): - # This one is to handle the case of composite dict, having - # different types of values at different keys, i.e. object - prop_type = prop_type.properties.get(key) - if prop_type is None: - raise webob.exc.HTTPBadRequest(error_msg) - else: - prop_type = prop_type.properties - - property_name = prop_name + '.' + key - property_value = val - else: - if key is not None: - raise webob.exc.HTTPBadRequest(error_msg) - property_name = prop_name - property_value = value - - # now detect the value DB type - if prop_type.DB_TYPE is not None: - str_type = prop_type.DB_TYPE - elif isinstance(prop_type, list): - if not isinstance(prop_type.item_type, list): - position = "any" - str_type = prop_type.item_type.DB_TYPE - else: - raise webob.exc.HTTPBadRequest('Filtering by tuple-like' - ' fields is not supported') - else: - raise webob.exc.HTTPBadRequest(error_msg) - if property_value is not None: - property_value = self._bring_to_type(str_type, property_value) - - # convert the default operation to NE, EQ or IN - if key_only_check: - if op == 'default': - op = 'NE' - else: - raise webob.exc.HTTPBadRequest('Comparison not supported ' - 'for key-only filtering') - else: - if op == 'default': - op = 'IN' if isinstance(prop_type, list) else 'EQ' - - filters.setdefault(property_name, []) - - filters[property_name].append(dict(operator=op, position=position, - value=property_value, - type=str_type)) - return filters - - def list(self, req): - res = self._process_type_from_request(req, True) - params = req.params.copy() - show_level = params.pop('show_level', None) - if show_level is not None: - res['show_level'] = self._validate_show_level(show_level.strip()) - - limit = params.pop('limit', None) - marker = params.pop('marker', None) - - query_params = dict() - - query_params['sort_keys'], query_params['sort_dirs'] = ( - self._get_sorting_params(params, res['artifact_type'], - res['type_version'])) - - if marker is not None: - query_params['marker'] = marker - - query_params['limit'] = self._validate_limit(limit) - - query_params['filters'] = self._get_filters(res['artifact_type'], - params) - - query_params['type_name'] = res['artifact_type'].metadata.type_name - - return query_params - - def list_artifact_types(self, req): - return {} - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - # TODO(ivasilevskaya): ideally this should be autogenerated/loaded - ARTIFACTS_ENDPOINT = '/v0.1/artifacts' - fields = ['id', 'name', 'version', 'type_name', 'type_version', - 'visibility', 'state', 'owner', 'scope', 'created_at', - 'updated_at', 'tags', 'dependencies', 'blobs', 'properties'] - - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - - def default(self, response, res): - artifact = serialization.serialize_for_client( - res, show_level=Showlevel.DIRECT) - body = json.dumps(artifact, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def create(self, response, artifact): - response.status_int = 201 - self.default(response, artifact) - response.location = ( - '%(root_url)s/%(type_name)s/v%(type_version)s/%(id)s' % dict( - root_url=ResponseSerializer.ARTIFACTS_ENDPOINT, - type_name=artifact.metadata.endpoint, - type_version=artifact.metadata.type_version, - id=artifact.id)) - - def list(self, response, res): - params = dict(response.request.params) - params.pop('marker', None) - query = urlparse.urlencode(params) - type_name = response.request.urlvars.get('type_name') - type_version = response.request.urlvars.get('type_version') - if response.request.urlvars.get('state') == 'creating': - drafts = "/drafts" - else: - drafts = "" - - artifacts_list = [ - serialization.serialize_for_client(a, show_level=Showlevel.NONE) - for a in res['artifacts']] - url = "/v0.1/artifacts" - if type_name: - url += "/" + type_name - if type_version: - url += "/v" + type_version - url += drafts - if query: - first_url = url + "?" + query - else: - first_url = url - body = { - "artifacts": artifacts_list, - "first": first_url - } - if 'next_marker' in res: - params['marker'] = res['next_marker'] - next_query = urlparse.urlencode(params) - body['next'] = url + '?' + next_query - content = json.dumps(body, ensure_ascii=False) - response.unicode_body = six.text_type(content) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = 204 - - def download(self, response, blob): - response.headers['Content-Type'] = 'application/octet-stream' - response.app_iter = iter(blob.data_stream) - if blob.checksum: - response.headers['Content-MD5'] = blob.checksum - response.headers['Content-Length'] = str(blob.size) - - def list_artifact_types(self, response, res): - body = json.dumps(res, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - -def create_resource(): - """Images resource factory method""" - plugins = loader.ArtifactsPluginLoader('glance.artifacts.types') - deserializer = RequestDeserializer(plugins=plugins) - serializer = ResponseSerializer() - controller = ArtifactsController(plugins=plugins) - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/glare/v0_1/router.py b/glance/api/glare/v0_1/router.py deleted file mode 100644 index 1e36ef4e1e..0000000000 --- a/glance/api/glare/v0_1/router.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api.glare.v0_1 import glare -from glance.common import wsgi - - -UUID_REGEX = ( - R'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}') - - -class API(wsgi.Router): - - def _get_artifacts_resource(self): - if not self.artifacts_resource: - self.artifacts_resource = glare.create_resource() - return self.artifacts_resource - - def __init__(self, mapper): - self.artifacts_resource = None - artifacts_resource = self._get_artifacts_resource() - reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) - - def _check_json_content_type(environ, result): - return "application/json" in environ["CONTENT_TYPE"] - - def _check_octet_stream_content_type(environ, result): - return "application/octet-stream" in environ["CONTENT_TYPE"] - - def connect_routes(m, read_only): - with m.submapper(resource_name="artifact_operations", - path_prefix="/{id}", - requirements={'id': UUID_REGEX}) as art: - art.show() - if not read_only: - art.delete() - art.action('update', method='PATCH') - art.link('publish', method='POST') - - def connect_attr_action(attr): - if not read_only: - attr.action("upload", conditions={ - 'method': ["POST", "PUT"], - 'function': _check_octet_stream_content_type}) - attr.action("update_property", - conditions={ - 'method': ["POST", "PUT"], - 'function': _check_json_content_type}) - attr.link("download", method="GET") - - attr_map = art.submapper(resource_name="attr_operations", - path_prefix="/{attr}", path_left=None) - attr_items = art.submapper( - resource_name="attr_item_ops", - path_prefix="/{attr}/{path_left:.*}") - connect_attr_action(attr_map) - connect_attr_action(attr_items) - - m.connect("", action='list', conditions={'method': 'GET'}, - state='active') - m.connect("/drafts", action='list', conditions={'method': 'GET'}, - state='creating') - if not read_only: - m.connect("/drafts", action='create', - conditions={'method': 'POST'}) - - mapper.connect('/artifacts', - controller=artifacts_resource, - action='list_artifact_types', - conditions={'method': ['GET']}) - - versioned = mapper.submapper(path_prefix='/artifacts/{type_name}/' - 'v{type_version}', - controller=artifacts_resource) - - non_versioned = mapper.submapper(path_prefix='/artifacts/{type_name}', - type_version=None, - controller=artifacts_resource) - connect_routes(versioned, False) - connect_routes(non_versioned, True) - - mapper.connect('/artifacts', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - super(API, self).__init__(mapper) diff --git a/glance/api/glare/versions.py b/glance/api/glare/versions.py deleted file mode 100644 index 3f89596fed..0000000000 --- a/glance/api/glare/versions.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -import webob.dec - -from glance.common import wsgi -from glance import i18n - -_ = i18n._ - -versions_opts = [ - - # Note: Since both glance-api and glare-api have the same name for the - # option public_endpoint, oslo.config generator throws a DuplicateError - # exception during the conf file generation incase of differing help - # texts. Hence we have to have identical help texts for glance-api and - # glare-api's public_endpoint if not for changing the conf opt name. - - cfg.StrOpt('public_endpoint', - help=_(""" -Public url endpoint to use for Glance/Glare versions response. - -This is the public url endpoint that will appear in the Glance/Glare -"versions" response. If no value is specified, the endpoint that is -displayed in the version's response is that of the host running the -API service. Change the endpoint to represent the proxy URL if the -API service is running behind a proxy. If the service is running -behind a load balancer, add the load balancer's URL for this value. - -Possible values: - * None - * Proxy URL - * Load balancer URL - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(versions_opts) - - -class Controller(object): - - """A wsgi controller that reports which API versions are supported.""" - - def index(self, req, explicit=False): - """Respond to a request for all OpenStack API versions.""" - def build_version_object(version, path, status): - url = CONF.public_endpoint or req.host_url - return { - 'id': 'v%s' % version, - 'status': status, - 'links': [ - { - 'rel': 'self', - 'href': '%s/%s/' % (url, path), - }, - ], - } - - version_objs = [build_version_object(0.1, 'v0.1', 'EXPERIMENTAL')] - status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES - response = webob.Response(request=req, - status=status, - content_type='application/json') - response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) - return response - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - return self.index(req) - - -def create_resource(conf): - return wsgi.Resource(Controller()) diff --git a/glance/api/middleware/version_negotiation.py b/glance/api/middleware/version_negotiation.py index 62a26c08ce..d36a4a91d5 100644 --- a/glance/api/middleware/version_negotiation.py +++ b/glance/api/middleware/version_negotiation.py @@ -22,7 +22,6 @@ return from oslo_config import cfg from oslo_log import log as logging -from glance.api.glare import versions as artifacts_versions from glance.api import versions from glance.common import wsgi @@ -119,15 +118,3 @@ class VersionNegotiationFilter(wsgi.Middleware): r = path[:idx] req.path_info = path[idx:] return r - - -class GlareVersionNegotiationFilter(VersionNegotiationFilter): - def __init__(self, app): - super(GlareVersionNegotiationFilter, self).__init__(app) - self.versions_app = artifacts_versions.Controller() - self.vnd_mime_type = 'application/vnd.openstack.artifacts-' - - def _get_allowed_versions(self): - return { - 'v0.1': 0.1 - } diff --git a/glance/api/versions.py b/glance/api/versions.py index 7546b3f51d..8bcc9fd2fe 100644 --- a/glance/api/versions.py +++ b/glance/api/versions.py @@ -24,18 +24,11 @@ from glance.i18n import _, _LW versions_opts = [ - - # Note: Since both glance-api and glare-api have the same name for the - # option public_endpoint, oslo.config generator throws a DuplicateError - # exception during the conf file generation incase of differing help - # texts. Hence we have to have identical help texts for glance-api and - # glare-api's public_endpoint if not for changing the conf opt name. - cfg.StrOpt('public_endpoint', help=_(""" -Public url endpoint to use for Glance/Glare versions response. +Public url endpoint to use for Glance versions response. -This is the public url endpoint that will appear in the Glance/Glare +This is the public url endpoint that will appear in the Glance "versions" response. If no value is specified, the endpoint that is displayed in the version's response is that of the host running the API service. Change the endpoint to represent the proxy URL if the diff --git a/glance/cmd/glare.py b/glance/cmd/glare.py deleted file mode 100644 index 5abc4420ee..0000000000 --- a/glance/cmd/glare.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Glare (Glance Artifact Repository) API service -""" - -import sys - -import eventlet -from oslo_utils import encodeutils - - -eventlet.patcher.monkey_patch(all=False, socket=True, time=True, - select=True, thread=True, os=True) - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -import osprofiler.notifier -import osprofiler.web - -from glance.common import config -from glance.common import exception -from glance.common import wsgi -from glance import notifier - - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") -logging.register_options(CONF) - -KNOWN_EXCEPTIONS = (RuntimeError, - exception.WorkerCreationFailure, - glance_store.exceptions.BadStoreConfiguration) - - -def fail(e): - global KNOWN_EXCEPTIONS - return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 - sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) - sys.exit(return_code) - - -def main(): - try: - config.parse_args() - wsgi.set_eventlet_hub() - logging.setup(CONF, 'glare') - - if cfg.CONF.profiler.enabled: - _notifier = osprofiler.notifier.create("Messaging", - oslo_messaging, {}, - notifier.get_transport(), - "glance", "artifacts", - cfg.CONF.bind_host) - osprofiler.notifier.set(_notifier) - else: - osprofiler.web.disable() - - server = wsgi.Server(initialize_glance_store=True) - server.start(config.load_paste_app('glare-api'), default_port=9494) - server.wait() - except KNOWN_EXCEPTIONS as e: - fail(e) - - -if __name__ == '__main__': - main() diff --git a/glance/common/exception.py b/glance/common/exception.py index 8176543231..7c6ab7cff2 100644 --- a/glance/common/exception.py +++ b/glance/common/exception.py @@ -452,101 +452,6 @@ class MetadefTagNotFound(NotFound): " namespace=%(namespace_name)s.") -class InvalidVersion(Invalid): - message = _("Version is invalid: %(reason)s") - - -class InvalidArtifactTypePropertyDefinition(Invalid): - message = _("Invalid property definition") - - -class InvalidArtifactTypeDefinition(Invalid): - message = _("Invalid type definition") - - -class InvalidArtifactPropertyValue(Invalid): - message = _("Property '%(name)s' may not have value '%(val)s': %(msg)s") - - def __init__(self, message=None, *args, **kwargs): - super(InvalidArtifactPropertyValue, self).__init__(message, *args, - **kwargs) - self.name = kwargs.get('name') - self.value = kwargs.get('val') - - -class ArtifactNotFound(NotFound): - message = _("Artifact with id=%(id)s was not found") - - -class ArtifactForbidden(Forbidden): - message = _("Artifact with id=%(id)s is not accessible") - - -class ArtifactDuplicateNameTypeVersion(Duplicate): - message = _("Artifact with the specified type, name and version" - " already exists") - - -class InvalidArtifactStateTransition(Invalid): - message = _("Artifact cannot change state from %(source)s to %(target)s") - - -class ArtifactDuplicateDirectDependency(Duplicate): - message = _("Artifact with the specified type, name and version" - " already has the direct dependency=%(dep)s") - - -class ArtifactDuplicateTransitiveDependency(Duplicate): - message = _("Artifact with the specified type, name and version" - " already has the transitive dependency=%(dep)s") - - -class ArtifactCircularDependency(Invalid): - message = _("Artifact with a circular dependency can not be created") - - -class ArtifactUnsupportedPropertyOperator(Invalid): - message = _("Operator %(op)s is not supported") - - -class ArtifactUnsupportedShowLevel(Invalid): - message = _("Show level %(shl)s is not supported in this operation") - - -class ArtifactPropertyValueNotFound(NotFound): - message = _("Property's %(prop)s value has not been found") - - -class ArtifactInvalidProperty(Invalid): - message = _("Artifact has no property %(prop)s") - - -class ArtifactInvalidPropertyParameter(Invalid): - message = _("Cannot use this parameter with the operator %(op)s") - - -class ArtifactLoadError(GlanceException): - message = _("Cannot load artifact '%(name)s'") - - -class ArtifactNonMatchingTypeName(ArtifactLoadError): - message = _("Plugin name '%(plugin)s' should match " - "artifact typename '%(name)s'") - - -class ArtifactPluginNotFound(NotFound): - message = _("No plugin for '%(name)s' has been loaded") - - -class UnknownArtifactType(NotFound): - message = _("Artifact type with name '%(name)s' and version '%(version)s' " - "is not known") - - -class ArtifactInvalidStateTransition(Invalid): - message = _("Artifact state cannot be changed from %(curr)s to %(to)s") - - class JsonPatchException(GlanceException): message = _("Invalid jsonpatch request") diff --git a/glance/common/glare/__init__.py b/glance/common/glare/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/common/glare/declarative.py b/glance/common/glare/declarative.py deleted file mode 100644 index 4b400d7e0f..0000000000 --- a/glance/common/glare/declarative.py +++ /dev/null @@ -1,743 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import re - -import semantic_version -import six - -from glance.common import exception as exc -from glance.i18n import _ - - -class AttributeDefinition(object): - """A base class for the attribute definitions which may be added to - declaratively defined artifact types - """ - - ALLOWED_TYPES = (object,) - - def __init__(self, - display_name=None, - description=None, - readonly=False, - mutable=True, - required=False, - default=None): - """Initializes attribute definition - - :param display_name: Display name of the attribute - :param description: Description of the attribute - :param readonly: Flag indicating if the value of attribute may not be - changed once an artifact is created - :param mutable: Flag indicating if the value of attribute may not be - changed once an artifact is published - :param required: Flag indicating if the value of attribute is required - :param default: default value of the attribute - """ - self.name = None - self.display_name = display_name - self.description = description - self.readonly = readonly - self.required = required - self.mutable = mutable - self.default = default - self._add_validator('type', - lambda v: isinstance(v, self.ALLOWED_TYPES), - _("Not a valid value type")) - self._validate_default() - - def _set_name(self, value): - self.name = value - if self.display_name is None: - self.display_name = value - - def _add_validator(self, name, func, message): - if not hasattr(self, '_validators'): - self._validators = [] - self._validators_index = {} - pair = (func, message) - self._validators.append(pair) - self._validators_index[name] = pair - - def _get_validator(self, name): - return self._validators_index.get(name) - - def _remove_validator(self, name): - pair = self._validators_index.pop(name, None) - if pair is not None: - self._validators.remove(pair) - - def _check_definition(self): - self._validate_default() - - def _validate_default(self): - if self.default: - try: - self.validate(self.default, 'default') - except exc.InvalidArtifactPropertyValue: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Default value is invalid")) - - def get_value(self, obj): - return getattr(obj, self.name) - - def set_value(self, obj, value): - return setattr(obj, self.name, value) - - def validate(self, value, name=None): - if value is None: - if self.required: - raise exc.InvalidArtifactPropertyValue( - name=name or self.name, - val=value, - msg=_('Value is required')) - else: - return - - first_error = next((msg for v_func, msg in self._validators - if not v_func(value)), None) - if first_error: - raise exc.InvalidArtifactPropertyValue(name=name or self.name, - val=value, - msg=first_error) - - -class ListAttributeDefinition(AttributeDefinition): - """A base class for Attribute definitions having List-semantics - - Is inherited by Array, ArtifactReferenceList and BinaryObjectList - """ - ALLOWED_TYPES = (list,) - ALLOWED_ITEM_TYPES = (AttributeDefinition, ) - - def _check_item_type(self, item): - if not isinstance(item, self.ALLOWED_ITEM_TYPES): - raise exc.InvalidArtifactTypePropertyDefinition( - _('Invalid item type specification')) - if item.default is not None: - raise exc.InvalidArtifactTypePropertyDefinition( - _('List definitions may hot have defaults')) - - def __init__(self, item_type, min_size=0, max_size=None, unique=False, - **kwargs): - - super(ListAttributeDefinition, self).__init__(**kwargs) - if isinstance(item_type, list): - for it in item_type: - self._check_item_type(it) - - # we need to copy the item_type collection - self.item_type = item_type[:] - - if min_size != 0: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Cannot specify 'min_size' explicitly") - ) - - if max_size is not None: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Cannot specify 'max_size' explicitly") - ) - - # setting max_size and min_size to the length of item_type, - # as tuple-semantic assumes that the number of elements is set - # by the type spec - min_size = max_size = len(item_type) - else: - self._check_item_type(item_type) - self.item_type = item_type - - if min_size: - self.min_size(min_size) - - if max_size: - self.max_size(max_size) - - if unique: - self.unique() - - def min_size(self, value): - self._min_size = value - if value is not None: - self._add_validator('min_size', - lambda v: len(v) >= self._min_size, - _('List size is less than minimum')) - else: - self._remove_validator('min_size') - - def max_size(self, value): - self._max_size = value - if value is not None: - self._add_validator('max_size', - lambda v: len(v) <= self._max_size, - _('List size is greater than maximum')) - else: - self._remove_validator('max_size') - - def unique(self, value=True): - self._unique = value - if value: - def _unique(items): - seen = set() - for item in items: - if item in seen: - return False - seen.add(item) - return True - self._add_validator('unique', - _unique, _('Items have to be unique')) - else: - self._remove_validator('unique') - - def _set_name(self, value): - super(ListAttributeDefinition, self)._set_name(value) - if isinstance(self.item_type, list): - for i, item in enumerate(self.item_type): - item._set_name("%s[%i]" % (value, i)) - else: - self.item_type._set_name("%s[*]" % value) - - def validate(self, value, name=None): - super(ListAttributeDefinition, self).validate(value, name) - if value is not None: - for i, item in enumerate(value): - self._validate_item_at(item, i) - - def get_item_definition_at_index(self, index): - if isinstance(self.item_type, list): - if index < len(self.item_type): - return self.item_type[index] - else: - return None - return self.item_type - - def _validate_item_at(self, item, index): - item_type = self.get_item_definition_at_index(index) - # set name if none has been given to the list element at given index - if (isinstance(self.item_type, list) and item_type and - not item_type.name): - item_type.name = "%s[%i]" % (self.name, index) - if item_type: - item_type.validate(item) - - -class DictAttributeDefinition(AttributeDefinition): - """A base class for Attribute definitions having Map-semantics - - Is inherited by Dict - """ - ALLOWED_TYPES = (dict,) - ALLOWED_PROPERTY_TYPES = (AttributeDefinition,) - - def _check_prop(self, key, item): - if (not isinstance(item, self.ALLOWED_PROPERTY_TYPES) or - (key is not None and not isinstance(key, six.string_types))): - raise exc.InvalidArtifactTypePropertyDefinition( - _('Invalid dict property type specification')) - - @staticmethod - def _validate_key(key): - if not isinstance(key, six.string_types): - raise exc.InvalidArtifactPropertyValue( - _('Invalid dict property type')) - - def __init__(self, properties, min_properties=0, max_properties=0, - **kwargs): - super(DictAttributeDefinition, self).__init__(**kwargs) - if isinstance(properties, dict): - for key, value in six.iteritems(properties): - self._check_prop(key, value) - # copy the properties dict - self.properties = properties.copy() - - self._add_validator('keys', - lambda v: set(v.keys()) <= set( - self.properties.keys()), - _('Dictionary contains unexpected key(s)')) - else: - self._check_prop(None, properties) - self.properties = properties - - if min_properties: - self.min_properties(min_properties) - - if max_properties: - self.max_properties(max_properties) - - def min_properties(self, value): - self._min_properties = value - if value is not None: - self._add_validator('min_properties', - lambda v: len(v) >= self._min_properties, - _('Dictionary size is less than ' - 'minimum')) - else: - self._remove_validator('min_properties') - - def max_properties(self, value): - self._max_properties = value - if value is not None: - self._add_validator('max_properties', - lambda v: len(v) <= self._max_properties, - _('Dictionary size is ' - 'greater than maximum')) - else: - self._remove_validator('max_properties') - - def _set_name(self, value): - super(DictAttributeDefinition, self)._set_name(value) - if isinstance(self.properties, dict): - for k, v in six.iteritems(self.properties): - v._set_name(value) - else: - self.properties._set_name(value) - - def validate(self, value, name=None): - super(DictAttributeDefinition, self).validate(value, name) - if value is not None: - for k, v in six.iteritems(value): - self._validate_item_with_key(v, k) - - def _validate_item_with_key(self, value, key): - self._validate_key(key) - if isinstance(self.properties, dict): - prop_def = self.properties.get(key) - if prop_def is not None: - name = "%s[%s]" % (prop_def.name, key) - prop_def.validate(value, name=name) - else: - name = "%s[%s]" % (self.properties.name, key) - self.properties.validate(value, name=name) - - def get_prop_definition_at_key(self, key): - if isinstance(self.properties, dict): - return self.properties.get(key) - else: - return self.properties - - -class PropertyDefinition(AttributeDefinition): - """A base class for Attributes defining generic or type-specific metadata - properties - """ - DB_TYPE = None - - def __init__(self, - internal=False, - allowed_values=None, - validators=None, - **kwargs): - """Defines a metadata property - - :param internal: a flag indicating that the property is internal, i.e. - not returned to client - :param allowed_values: specifies a list of values allowed for the - property - :param validators: specifies a list of custom validators for the - property - """ - super(PropertyDefinition, self).__init__(**kwargs) - self.internal = internal - self._allowed_values = None - - if validators is not None: - try: - for i, (f, m) in enumerate(validators): - self._add_validator("custom_%i" % i, f, m) - except ValueError: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Custom validators list should contain tuples " - "'(function, message)'")) - - if allowed_values is not None: - # copy the allowed_values, as this is going to create a - # closure, and we need to make sure that external modification of - # this list does not affect the created validator - self.allowed_values(allowed_values) - self._check_definition() - - def _validate_allowed_values(self): - if self._allowed_values: - try: - for allowed_value in self._allowed_values: - self.validate(allowed_value, 'allowed_value') - except exc.InvalidArtifactPropertyValue: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Allowed values %s are invalid under given validators") % - self._allowed_values) - - def allowed_values(self, values): - self._allowed_values = values[:] - if values is not None: - self._add_validator('allowed', lambda v: v in self._allowed_values, - _("Is not allowed value")) - else: - self._remove_validator('allowed') - self._check_definition() - - def _check_definition(self): - self._validate_allowed_values() - super(PropertyDefinition, self)._check_definition() - - -class RelationDefinition(AttributeDefinition): - """A base class for Attributes defining cross-artifact relations""" - def __init__(self, internal=False, **kwargs): - self.internal = internal - kwargs.setdefault('mutable', False) - # if mutable=True has been passed -> raise an exception - if kwargs['mutable'] is True: - raise exc.InvalidArtifactTypePropertyDefinition( - _("Dependency relations cannot be mutable")) - super(RelationDefinition, self).__init__(**kwargs) - - -class BlobDefinition(AttributeDefinition): - """A base class for Attributes defining binary objects""" - pass - - -class ArtifactTypeMetaclass(type): - """A metaclass to build Artifact Types. Not intended to be used directly - - Use `get_declarative_base` to get the base class instead - """ - def __init__(cls, class_name, bases, attributes): - if '_declarative_artifact_type' not in cls.__dict__: - _build_declarative_meta(cls) - super(ArtifactTypeMetaclass, cls).__init__(class_name, bases, - attributes) - - -class ArtifactPropertyDescriptor(object): - """A descriptor object for working with artifact attributes""" - - def __init__(self, prop, collection_wrapper_class=None): - self.prop = prop - self.collection_wrapper_class = collection_wrapper_class - - def __get__(self, instance, owner): - if instance is None: - # accessed via owner class - return self.prop - else: - v = getattr(instance, '_' + self.prop.name, None) - if v is None and self.prop.default is not None: - v = copy.copy(self.prop.default) - self.__set__(instance, v, ignore_mutability=True) - return self.__get__(instance, owner) - else: - if v is not None and self.collection_wrapper_class: - if self.prop.readonly: - readonly = True - elif (not self.prop.mutable and - hasattr(instance, '__is_mutable__') and - not hasattr(instance, - '__suspend_mutability_checks__')): - - readonly = not instance.__is_mutable__() - else: - readonly = False - if readonly: - v = v.__make_immutable__() - return v - - def __set__(self, instance, value, ignore_mutability=False): - if instance: - if self.prop.readonly: - if hasattr(instance, '_' + self.prop.name): - raise exc.InvalidArtifactPropertyValue( - _('Attempt to set readonly property')) - if not self.prop.mutable: - if (hasattr(instance, '__is_mutable__') and - not hasattr(instance, - '__suspend_mutability_checks__')): - mutable = instance.__is_mutable__() or ignore_mutability - if not mutable: - raise exc.InvalidArtifactPropertyValue( - _('Attempt to set value of immutable property')) - if value is not None and self.collection_wrapper_class: - value = self.collection_wrapper_class(value) - value.property = self.prop - self.prop.validate(value) - setattr(instance, '_' + self.prop.name, value) - - -class ArtifactAttributes(object): - """A container class storing description of Artifact Type attributes""" - def __init__(self): - self.properties = {} - self.dependencies = {} - self.blobs = {} - self.all = {} - - @property - def default_dependency(self): - """Returns the default dependency relation for an artifact type""" - if len(self.dependencies) == 1: - return list(self.dependencies.values())[0] - - @property - def default_blob(self): - """Returns the default blob object for an artifact type""" - if len(self.blobs) == 1: - return list(self.blobs.values())[0] - - @property - def default_properties_dict(self): - """Returns a default properties dict for an artifact type""" - dict_props = [v for v in self.properties.values() if - isinstance(v, DictAttributeDefinition)] - if len(dict_props) == 1: - return dict_props[0] - - @property - def tags(self): - """Returns tags property for an artifact type""" - return self.properties.get('tags') - - def add(self, attribute): - self.all[attribute.name] = attribute - if isinstance(attribute, PropertyDefinition): - self.properties[attribute.name] = attribute - elif isinstance(attribute, BlobDefinition): - self.blobs[attribute.name] = attribute - elif isinstance(attribute, RelationDefinition): - self.dependencies[attribute.name] = attribute - - -class ArtifactTypeMetadata(object): - """A container to store the meta-information about an artifact type""" - - def __init__(self, type_name, type_display_name, type_version, - type_description, endpoint): - """Initializes the Artifact Type metadata - - :param type_name: name of the artifact type - :param type_display_name: display name of the artifact type - :param type_version: version of the artifact type - :param type_description: description of the artifact type - :param endpoint: REST API URI suffix to call the artifacts of this type - """ - - self.attributes = ArtifactAttributes() - - # These are going to be defined by third-party plugin - # developers, so we need to do some validations on these values and - # raise InvalidArtifactTypeDefinition if they are violated - self.type_name = type_name - self.type_display_name = type_display_name or type_name - self.type_version = type_version or '1.0' - self.type_description = type_description - self.endpoint = endpoint or type_name.lower() - - self._validate_string(self.type_name, 'Type name', min_length=1, - max_length=255) - self._validate_string(self.type_display_name, 'Type display name', - max_length=255) - self._validate_string(self.type_description, 'Type description') - self._validate_string(self.endpoint, 'endpoint', min_length=1) - try: - semantic_version.Version(self.type_version, partial=True) - except ValueError: - raise exc.InvalidArtifactTypeDefinition( - message=_("Type version has to be a valid semver string")) - - @staticmethod - def _validate_string(value, name, min_length=0, max_length=None, - pattern=None): - if value is None: - if min_length > 0: - raise exc.InvalidArtifactTypeDefinition( - message=_("%(attribute)s is required"), attribute=name) - else: - return - if not isinstance(value, six.string_types): - raise exc.InvalidArtifactTypeDefinition( - message=_("%(attribute)s have to be string"), attribute=name) - if max_length and len(value) > max_length: - raise exc.InvalidArtifactTypeDefinition( - message=_("%(attribute)s may not be longer than %(length)i"), - attribute=name, length=max_length) - if min_length and len(value) < min_length: - raise exc.InvalidArtifactTypeDefinition( - message=_("%(attribute)s may not be shorter than %(length)i"), - attribute=name, length=min_length) - if pattern and not re.match(pattern, value): - raise exc.InvalidArtifactTypeDefinition( - message=_("%(attribute)s should match pattern %(pattern)s"), - attribute=name, pattern=pattern.pattern) - - -def _build_declarative_meta(cls): - attrs = dict(cls.__dict__) - type_name = None - type_display_name = None - type_version = None - type_description = None - endpoint = None - - for base in cls.__mro__: - for name, value in six.iteritems(vars(base)): - if name == '__type_name__': - if not type_name: - type_name = cls.__type_name__ - elif name == '__type_version__': - if not type_version: - type_version = cls.__type_version__ - elif name == '__type_description__': - if not type_description: - type_description = cls.__type_description__ - elif name == '__endpoint__': - if not endpoint: - endpoint = cls.__endpoint__ - elif name == '__type_display_name__': - if not type_display_name: - type_display_name = cls.__type_display_name__ - elif base is not cls and name not in attrs: - if isinstance(value, AttributeDefinition): - attrs[name] = value - elif isinstance(value, ArtifactPropertyDescriptor): - attrs[name] = value.prop - - meta = ArtifactTypeMetadata(type_name=type_name or cls.__name__, - type_display_name=type_display_name, - type_version=type_version, - type_description=type_description, - endpoint=endpoint) - setattr(cls, 'metadata', meta) - for k, v in attrs.items(): - if k == 'metadata': - raise exc.InvalidArtifactTypePropertyDefinition( - _("Cannot declare artifact property with reserved name " - "'metadata'")) - if isinstance(v, AttributeDefinition): - v._set_name(k) - wrapper_class = None - if isinstance(v, ListAttributeDefinition): - wrapper_class = type("ValidatedList", (list,), {}) - _add_validation_to_list(wrapper_class) - if isinstance(v, DictAttributeDefinition): - wrapper_class = type("ValidatedDict", (dict,), {}) - _add_validation_to_dict(wrapper_class) - prop_descr = ArtifactPropertyDescriptor(v, wrapper_class) - setattr(cls, k, prop_descr) - meta.attributes.add(v) - - -def _validating_method(method, klass): - def wrapper(self, *args, **kwargs): - instance_copy = klass(self) - method(instance_copy, *args, **kwargs) - self.property.validate(instance_copy) - method(self, *args, **kwargs) - - return wrapper - - -def _immutable_method(method): - def substitution(*args, **kwargs): - raise exc.InvalidArtifactPropertyValue( - _("Unable to modify collection in " - "immutable or readonly property")) - - return substitution - - -def _add_immutable_wrappers(class_to_add, wrapped_methods): - for method_name in wrapped_methods: - method = getattr(class_to_add, method_name, None) - if method: - setattr(class_to_add, method_name, _immutable_method(method)) - - -def _add_validation_wrappers(class_to_validate, base_class, validated_methods): - for method_name in validated_methods: - method = getattr(class_to_validate, method_name, None) - if method: - setattr(class_to_validate, method_name, - _validating_method(method, base_class)) - readonly_class = type("Readonly" + class_to_validate.__name__, - (class_to_validate,), {}) - _add_immutable_wrappers(readonly_class, validated_methods) - - def __make_immutable__(self): - return readonly_class(self) - - class_to_validate.__make_immutable__ = __make_immutable__ - - -def _add_validation_to_list(list_based_class): - validated_methods = ['append', 'extend', 'insert', 'pop', 'remove', - 'reverse', 'sort', '__setitem__', '__delitem__', - '__delslice__'] - _add_validation_wrappers(list_based_class, list, validated_methods) - - -def _add_validation_to_dict(dict_based_class): - validated_methods = ['pop', 'popitem', 'setdefault', 'update', - '__delitem__', '__setitem__', 'clear'] - _add_validation_wrappers(dict_based_class, dict, validated_methods) - - -def _kwarg_init_constructor(self, **kwargs): - self.__suspend_mutability_checks__ = True - try: - for k in kwargs: - if not hasattr(type(self), k): - raise exc.ArtifactInvalidProperty(prop=k) - setattr(self, k, kwargs[k]) - self._validate_required(self.metadata.attributes.properties) - finally: - del self.__suspend_mutability_checks__ - - -def _validate_required(self, attribute_dict): - for k, v in six.iteritems(attribute_dict): - if v.required and (not hasattr(self, k) or getattr(self, k) is None): - raise exc.InvalidArtifactPropertyValue(name=k, val=None, - msg=_('Value is required')) - - -def _update(self, values): - for k in values: - if hasattr(type(self), k): - setattr(self, k, values[k]) - else: - raise exc.ArtifactInvalidProperty(prop=k) - - -def _pre_publish_validator(self, *args, **kwargs): - self._validate_required(self.metadata.attributes.blobs) - self._validate_required(self.metadata.attributes.dependencies) - - -_kwarg_init_constructor.__name__ = '__init__' -_pre_publish_validator.__name__ = '__pre_publish__' -_update.__name__ = 'update' - - -def get_declarative_base(name='base', base_class=object): - """Returns a base class which should be inherited to construct Artifact - Type object using the declarative syntax of attribute definition - """ - bases = not isinstance(base_class, tuple) and (base_class,) or base_class - class_dict = {'__init__': _kwarg_init_constructor, - '_validate_required': _validate_required, - '__pre_publish__': _pre_publish_validator, - '_declarative_artifact_type': True, - 'update': _update} - return ArtifactTypeMetaclass(name, bases, class_dict) diff --git a/glance/common/glare/definitions.py b/glance/common/glare/definitions.py deleted file mode 100644 index cda5993c24..0000000000 --- a/glance/common/glare/definitions.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import numbers -import re - -import semantic_version -import six - -import glance.common.exception as exc -from glance.common.glare import declarative -from glance.i18n import _ - - -class Text(declarative.PropertyDefinition): - """A text metadata property of arbitrary length - - Maps to TEXT columns in database, does not support sorting or filtering - """ - ALLOWED_TYPES = (six.string_types,) - DB_TYPE = 'text' - - -# noinspection PyAttributeOutsideInit -class String(Text): - """A string metadata property of limited length - - Maps to VARCHAR columns in database, supports filtering and sorting. - May have constrains on length and regexp patterns. - - The maximum length is limited to 255 characters - """ - - DB_TYPE = 'string' - - def __init__(self, max_length=255, min_length=0, pattern=None, **kwargs): - """Defines a String metadata property. - - :param max_length: maximum value length - :param min_length: minimum value length - :param pattern: regexp pattern to match - """ - super(String, self).__init__(**kwargs) - - self.max_length(max_length) - self.min_length(min_length) - if pattern: - self.pattern(pattern) - # if default and/or allowed_values are specified (in base classes) - # then we need to validate them against the newly added validators - self._check_definition() - - def max_length(self, value): - """Sets the maximum value length""" - self._max_length = value - if value is not None: - if value > 255: - raise exc.InvalidArtifactTypePropertyDefinition( - _('Max string length may not exceed 255 characters')) - self._add_validator('max_length', - lambda v: len(v) <= self._max_length, - _('Length is greater than maximum')) - else: - self._remove_validator('max_length') - self._check_definition() - - def min_length(self, value): - """Sets the minimum value length""" - self._min_length = value - if value is not None: - if value < 0: - raise exc.InvalidArtifactTypePropertyDefinition( - _('Min string length may not be negative')) - - self._add_validator('min_length', - lambda v: len(v) >= self._min_length, - _('Length is less than minimum')) - else: - self._remove_validator('min_length') - self._check_definition() - - def pattern(self, value): - """Sets the regexp pattern to match""" - self._pattern = value - if value is not None: - self._add_validator('pattern', - lambda v: re.match(self._pattern, - v) is not None, - _('Does not match pattern')) - else: - self._remove_validator('pattern') - self._check_definition() - - -class SemVerString(String): - """A String metadata property matching semver pattern""" - - def __init__(self, **kwargs): - def validate(value): - try: - semantic_version.Version(value, partial=True) - except ValueError: - return False - return True - - super(SemVerString, - self).__init__(validators=[(validate, - "Invalid semver string")], - **kwargs) - - -# noinspection PyAttributeOutsideInit -class Integer(declarative.PropertyDefinition): - """An Integer metadata property - - Maps to INT columns in Database, supports filtering and sorting. - May have constraints on value - """ - - ALLOWED_TYPES = (six.integer_types,) - DB_TYPE = 'int' - - def __init__(self, min_value=None, max_value=None, **kwargs): - """Defines an Integer metadata property - - :param min_value: minimum allowed value - :param max_value: maximum allowed value - """ - super(Integer, self).__init__(**kwargs) - if min_value is not None: - self.min_value(min_value) - - if max_value is not None: - self.max_value(max_value) - - # if default and/or allowed_values are specified (in base classes) - # then we need to validate them against the newly added validators - self._check_definition() - - def min_value(self, value): - """Sets the minimum allowed value""" - self._min_value = value - if value is not None: - self._add_validator('min_value', - lambda v: v >= self._min_value, - _('Value is less than minimum')) - else: - self._remove_validator('min_value') - self._check_definition() - - def max_value(self, value): - """Sets the maximum allowed value""" - self._max_value = value - if value is not None: - self._add_validator('max_value', - lambda v: v <= self._max_value, - _('Value is greater than maximum')) - else: - self._remove_validator('max_value') - self._check_definition() - - -# noinspection PyAttributeOutsideInit -class DateTime(declarative.PropertyDefinition): - """A DateTime metadata property - - Maps to a DATETIME columns in database. - Is not supported as Type Specific property, may be used only as Generic one - - May have constraints on value - """ - ALLOWED_TYPES = (datetime.datetime,) - DB_TYPE = 'datetime' - - def __init__(self, min_value=None, max_value=None, **kwargs): - """Defines a DateTime metadata property - - :param min_value: minimum allowed value - :param max_value: maximum allowed value - """ - super(DateTime, self).__init__(**kwargs) - if min_value is not None: - self.min_value(min_value) - - if max_value is not None: - self.max_value(max_value) - - # if default and/or allowed_values are specified (in base classes) - # then we need to validate them against the newly added validators - self._check_definition() - - def min_value(self, value): - """Sets the minimum allowed value""" - self._min_value = value - if value is not None: - self._add_validator('min_value', - lambda v: v >= self._min_value, - _('Value is less than minimum')) - else: - self._remove_validator('min_value') - self._check_definition() - - def max_value(self, value): - """Sets the maximum allowed value""" - self._max_value = value - if value is not None: - self._add_validator('max_value', - lambda v: v <= self._max_value, - _('Value is greater than maximum')) - else: - self._remove_validator('max_value') - self._check_definition() - - -# noinspection PyAttributeOutsideInit -class Numeric(declarative.PropertyDefinition): - """A Numeric metadata property - - Maps to floating point number columns in Database, supports filtering and - sorting. May have constraints on value - """ - ALLOWED_TYPES = numbers.Number - DB_TYPE = 'numeric' - - def __init__(self, min_value=None, max_value=None, **kwargs): - """Defines a Numeric metadata property - - :param min_value: minimum allowed value - :param max_value: maximum allowed value - """ - super(Numeric, self).__init__(**kwargs) - if min_value is not None: - self.min_value(min_value) - - if max_value is not None: - self.max_value(max_value) - - # if default and/or allowed_values are specified (in base classes) - # then we need to validate them against the newly added validators - self._check_definition() - - def min_value(self, value): - """Sets the minimum allowed value""" - self._min_value = value - if value is not None: - self._add_validator('min_value', - lambda v: v >= self._min_value, - _('Value is less than minimum')) - else: - self._remove_validator('min_value') - self._check_definition() - - def max_value(self, value): - """Sets the maximum allowed value""" - self._max_value = value - if value is not None: - self._add_validator('max_value', - lambda v: v <= self._max_value, - _('Value is greater than maximum')) - else: - self._remove_validator('max_value') - self._check_definition() - - -class Boolean(declarative.PropertyDefinition): - """A Boolean metadata property - - Maps to Boolean columns in database. Supports filtering and sorting. - """ - ALLOWED_TYPES = (bool,) - DB_TYPE = 'bool' - - -class Array(declarative.ListAttributeDefinition, - declarative.PropertyDefinition, list): - """An array metadata property - - May contain elements of any other PropertyDefinition types except Dict and - Array. Each elements maps to appropriate type of columns in database. - Preserves order. Allows filtering based on "Array contains Value" semantics - - May specify constrains on types of elements, their amount and uniqueness. - """ - ALLOWED_ITEM_TYPES = (declarative.PropertyDefinition,) - - def __init__(self, item_type=String(), min_size=0, max_size=None, - unique=False, extra_items=True, **kwargs): - """Defines an Array metadata property - - :param item_type: defines the types of elements in Array. If set to an - instance of PropertyDefinition then all the elements have to be of that - type. If set to list of such instances, then the elements on the - corresponding positions have to be of the appropriate type. - :param min_size: minimum size of the Array - :param max_size: maximum size of the Array - :param unique: if set to true, all the elements in the Array have to be - unique - """ - if isinstance(item_type, Array): - msg = _("Array property can't have item_type=Array") - raise exc.InvalidArtifactTypePropertyDefinition(msg) - declarative.ListAttributeDefinition.__init__(self, - item_type=item_type, - min_size=min_size, - max_size=max_size, - unique=unique) - declarative.PropertyDefinition.__init__(self, **kwargs) - - -class Dict(declarative.DictAttributeDefinition, - declarative.PropertyDefinition, dict): - """A dictionary metadata property - - May contain elements of any other PropertyDefinition types except Dict. - Each elements maps to appropriate type of columns in database. Allows - filtering and sorting by values of each key except the ones mapping the - Text fields. - - May specify constrains on types of elements and their amount. - """ - ALLOWED_PROPERTY_TYPES = (declarative.PropertyDefinition,) - - def __init__(self, properties=String(), min_properties=0, - max_properties=None, **kwargs): - """Defines a dictionary metadata property - - :param properties: defines the types of dictionary values. If set to an - instance of PropertyDefinition then all the value have to be of that - type. If set to a dictionary with string keys and values of - PropertyDefinition type, then the elements mapped by the corresponding - have have to be of the appropriate type. - :param min_properties: minimum allowed amount of properties in the dict - :param max_properties: maximum allowed amount of properties in the dict - """ - declarative.DictAttributeDefinition.__init__( - self, - properties=properties, - min_properties=min_properties, - max_properties=max_properties) - declarative.PropertyDefinition.__init__(self, **kwargs) - - -class ArtifactType(declarative.get_declarative_base()): # noqa - """A base class for all the Artifact Type definitions - - Defines the Generic metadata properties as attributes. - """ - id = String(required=True, readonly=True) - type_name = String(required=True, readonly=True) - type_version = SemVerString(required=True, readonly=True) - name = String(required=True, mutable=False) - version = SemVerString(required=True, mutable=False) - description = Text() - tags = Array(unique=True, default=[]) - visibility = String(required=True, - allowed_values=["private", "public", "shared", - "community"], - default="private") - state = String(required=True, readonly=True, allowed_values=["creating", - "active", - "deactivated", - "deleted"]) - owner = String(required=True, readonly=True) - created_at = DateTime(required=True, readonly=True) - updated_at = DateTime(required=True, readonly=True) - published_at = DateTime(readonly=True) - deleted_at = DateTime(readonly=True) - - def __init__(self, **kwargs): - if "type_name" in kwargs: - raise exc.InvalidArtifactPropertyValue( - _("Unable to specify artifact type explicitly")) - if "type_version" in kwargs: - raise exc.InvalidArtifactPropertyValue( - _("Unable to specify artifact type version explicitly")) - super(ArtifactType, - self).__init__(type_name=self.metadata.type_name, - type_version=self.metadata.type_version, **kwargs) - - def __eq__(self, other): - if not isinstance(other, ArtifactType): - return False - return self.id == other.id - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash(self.id) - - def __is_mutable__(self): - return self.state == "creating" - - -class ArtifactReference(declarative.RelationDefinition): - """An artifact reference definition - - Allows to define constraints by the name and version of target artifact - """ - ALLOWED_TYPES = ArtifactType - - def __init__(self, type_name=None, type_version=None, **kwargs): - """Defines an artifact reference - - :param type_name: type name of the target artifact - :param type_version: type version of the target artifact - """ - super(ArtifactReference, self).__init__(**kwargs) - if type_name is not None: - if isinstance(type_name, list): - type_names = list(type_name) - if type_version is not None: - raise exc.InvalidArtifactTypePropertyDefinition( - _('Unable to specify version ' - 'if multiple types are possible')) - else: - type_names = [type_name] - - def validate_reference(artifact): - if artifact.type_name not in type_names: - return False - if (type_version is not None and - artifact.type_version != type_version): - return False - return True - - self._add_validator('referenced_type', - validate_reference, - _("Invalid referenced type")) - elif type_version is not None: - raise exc.InvalidArtifactTypePropertyDefinition( - _('Unable to specify version ' - 'if type is not specified')) - self._check_definition() - - -class ArtifactReferenceList(declarative.ListAttributeDefinition, - declarative.RelationDefinition, list): - """A list of Artifact References - - Allows to define a collection of references to other artifacts, each - optionally constrained by type name and type version - """ - ALLOWED_ITEM_TYPES = (ArtifactReference,) - - def __init__(self, references=ArtifactReference(), min_size=0, - max_size=None, **kwargs): - if isinstance(references, list): - raise exc.InvalidArtifactTypePropertyDefinition( - _("Invalid reference list specification")) - declarative.RelationDefinition.__init__(self, **kwargs) - declarative.ListAttributeDefinition.__init__(self, - item_type=references, - min_size=min_size, - max_size=max_size, - unique=True, - default=[] - if min_size == 0 else - None) - - -class Blob(object): - """A Binary object being part of the Artifact""" - def __init__(self, size=0, locations=None, checksum=None, item_key=None): - """Initializes a new Binary Object for an Artifact - - :param size: the size of Binary Data - :param locations: a list of data locations in backing stores - :param checksum: a checksum for the data - """ - if locations is None: - locations = [] - self.size = size - self.checksum = checksum - self.locations = locations - self.item_key = item_key - - def to_dict(self): - return { - "size": self.size, - "checksum": self.checksum, - } - - -class BinaryObject(declarative.BlobDefinition, Blob): - """A definition of BinaryObject binding - - Adds a BinaryObject to an Artifact Type, optionally constrained by file - size and amount of locations - """ - ALLOWED_TYPES = (Blob,) - - def __init__(self, - max_file_size=None, - min_file_size=None, - min_locations=None, - max_locations=None, - **kwargs): - """Defines a binary object as part of Artifact Type - :param max_file_size: maximum size of the associate Blob - :param min_file_size: minimum size of the associated Blob - :param min_locations: minimum number of locations in the associated - Blob - :param max_locations: maximum number of locations in the associated - Blob - """ - mutable = kwargs.pop('mutable', False) - if mutable: - raise exc.InvalidArtifactTypePropertyDefinition( - _("BinaryObject property cannot be declared mutable")) - super(BinaryObject, self).__init__(default=None, readonly=False, - mutable=mutable, **kwargs) - self._max_file_size = max_file_size - self._min_file_size = min_file_size - self._min_locations = min_locations - self._max_locations = max_locations - - self._add_validator('size_not_empty', - lambda v: v.size is not None, - _('Blob size is not set')) - if max_file_size: - self._add_validator('max_size', - lambda v: v.size <= self._max_file_size, - _("File too large")) - if min_file_size: - self._add_validator('min_size', - lambda v: v.size >= self._min_file_size, - _("File too small")) - if min_locations: - self._add_validator('min_locations', - lambda v: len( - v.locations) >= self._min_locations, - _("Too few locations")) - if max_locations: - self._add_validator( - 'max_locations', - lambda v: len(v.locations) <= self._max_locations, - _("Too many locations")) - - -class BinaryObjectList(declarative.ListAttributeDefinition, - declarative.BlobDefinition, list): - """A definition of binding to the list of BinaryObject - - Adds a list of BinaryObject's to an artifact type, optionally constrained - by the number of objects in the list and their uniqueness - - """ - ALLOWED_ITEM_TYPES = (BinaryObject,) - - def __init__(self, objects=BinaryObject(), min_count=0, max_count=None, - **kwargs): - declarative.BlobDefinition.__init__(self, **kwargs) - declarative.ListAttributeDefinition.__init__(self, - item_type=objects, - min_size=min_count, - max_size=max_count, - unique=True) - self.default = [] if min_count == 0 else None diff --git a/glance/common/glare/loader.py b/glance/common/glare/loader.py deleted file mode 100644 index 124b76f12b..0000000000 --- a/glance/common/glare/loader.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -import semantic_version -from stevedore import enabled - -from glance.common import exception -from glance.common.glare import definitions -from glance.i18n import _, _LE, _LI, _LW -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -plugins_opts = [ - cfg.BoolOpt('load_enabled', default=True, - help=_('When false, no artifacts can be loaded regardless of' - ' available_plugins. When true, artifacts can be' - ' loaded.')), - cfg.ListOpt('available_plugins', default=[], - help=_('A list of artifacts that are allowed in the' - ' format name or name-version. Empty list means that' - ' any artifact can be loaded.')) -] - - -CONF = cfg.CONF -CONF.register_opts(plugins_opts) - - -class ArtifactsPluginLoader(object): - def __init__(self, namespace, test_plugins=None): - self.mgr = test_plugins or enabled.EnabledExtensionManager( - check_func=self._gen_check_func(), - namespace=namespace, - propagate_map_exceptions=True, - on_load_failure_callback=self._on_load_failure) - self.plugin_map = {'by_typename': {}, - 'by_endpoint': {}} - - def _add_extension(ext): - """ - Plugins can be loaded as entry_point=single plugin and - entry_point=PLUGIN_LIST, where PLUGIN_LIST is a python variable - holding a list of plugins - """ - def _load_one(plugin): - if issubclass(plugin, definitions.ArtifactType): - # make sure that have correct plugin name - art_name = plugin.metadata.type_name - if art_name != ext.name: - raise exception.ArtifactNonMatchingTypeName( - name=art_name, plugin=ext.name) - # make sure that no plugin with the same name and version - # already exists - exists = self._get_plugins(ext.name) - new_tv = plugin.metadata.type_version - if any(e.metadata.type_version == new_tv for e in exists): - raise exception.ArtifactDuplicateNameTypeVersion() - self._add_plugin("by_endpoint", plugin.metadata.endpoint, - plugin) - self._add_plugin("by_typename", plugin.metadata.type_name, - plugin) - - if isinstance(ext.plugin, list): - for p in ext.plugin: - _load_one(p) - else: - _load_one(ext.plugin) - - # (ivasilevskaya) that looks pretty bad as RuntimeError is too general, - # but stevedore has awful exception wrapping with no specific class - # for this very case (no extensions for given namespace found) - try: - self.mgr.map(_add_extension) - except RuntimeError as re: - LOG.error(_LE("Unable to load artifacts: %s") % re.message) - - def _version(self, artifact): - return semantic_version.Version.coerce(artifact.metadata.type_version) - - def _add_plugin(self, spec, name, plugin): - """ - Inserts a new plugin into a sorted by desc type_version list - of existing plugins in order to retrieve the latest by next() - """ - def _add(name, value): - self.plugin_map[spec][name] = value - - old_order = copy.copy(self._get_plugins(name, spec=spec)) - for i, p in enumerate(old_order): - if self._version(p) < self._version(plugin): - _add(name, old_order[0:i] + [plugin] + old_order[i:]) - return - _add(name, old_order + [plugin]) - - def _get_plugins(self, name, spec="by_typename"): - if spec not in self.plugin_map.keys(): - return [] - return self.plugin_map[spec].get(name, []) - - def _gen_check_func(self): - """generates check_func for EnabledExtensionManager""" - - def _all_forbidden(ext): - LOG.warn(_LW("Can't load artifact %s: load disabled in config") % - ext.name) - raise exception.ArtifactLoadError(name=ext.name) - - def _all_allowed(ext): - LOG.info( - _LI("Artifact %s has been successfully loaded"), ext.name) - return True - - if not CONF.load_enabled: - return _all_forbidden - if len(CONF.available_plugins) == 0: - return _all_allowed - - available = [] - for name in CONF.available_plugins: - type_name, version = (name.split('-', 1) - if '-' in name else (name, None)) - available.append((type_name, version)) - - def _check_ext(ext): - try: - next(n for n, v in available - if n == ext.plugin.metadata.type_name and - (v is None or v == ext.plugin.metadata.type_version)) - except StopIteration: - LOG.warn(_LW("Can't load artifact %s: not in" - " available_plugins list") % ext.name) - raise exception.ArtifactLoadError(name=ext.name) - LOG.info( - _LI("Artifact %s has been successfully loaded"), ext.name) - return True - - return _check_ext - - # this has to be done explicitly as stevedore is pretty ignorant when - # face to face with an Exception and tries to swallow it and print sth - # irrelevant instead of expected error message - def _on_load_failure(self, manager, ep, exc): - msg = (_LE("Could not load plugin from %(module)s") % - {"module": ep.module_name}) - LOG.exception(msg) - raise exc - - def _find_class_in_collection(self, collection, name, version=None): - try: - def _cmp_version(plugin, version): - ver = semantic_version.Version.coerce - return (ver(plugin.metadata.type_version) == - ver(version)) - - if version: - return next((p for p in collection - if _cmp_version(p, version))) - return next((p for p in collection)) - except StopIteration: - raise exception.ArtifactPluginNotFound( - name="%s %s" % (name, "v %s" % version if version else "")) - - def get_class_by_endpoint(self, name, version=None): - if version is None: - classlist = self._get_plugins(name, spec="by_endpoint") - if not classlist: - raise exception.ArtifactPluginNotFound(name=name) - return self._find_class_in_collection(classlist, name) - return self._find_class_in_collection( - self._get_plugins(name, spec="by_endpoint"), name, version) - - def get_class_by_typename(self, name, version=None): - return self._find_class_in_collection( - self._get_plugins(name, spec="by_typename"), name, version) diff --git a/glance/common/glare/serialization.py b/glance/common/glare/serialization.py deleted file mode 100644 index e265feea7a..0000000000 --- a/glance/common/glare/serialization.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -import six - -from glance.common import exception -from glance.common.glare import declarative -from glance.common.glare import definitions -from glance import glare as ga -from glance.i18n import _ - - -COMMON_ARTIFACT_PROPERTIES = ['id', - 'type_name', - 'type_version', - 'name', - 'version', - 'description', - 'visibility', - 'state', - 'tags', - 'owner', - 'created_at', - 'updated_at', - 'published_at', - 'deleted_at'] - - -def _serialize_list_prop(prop, values): - """ - A helper func called to correctly serialize an Array property. - - Returns a dict {'type': some_supported_db_type, 'value': serialized_data} - """ - # FIXME(Due to a potential bug in declarative framework, for Arrays, that - # are values to some dict items (Dict(properties={"foo": Array()})), - # prop.get_value(artifact) returns not the real list of items, but the - # whole dict). So we can't rely on prop.get_value(artifact) and will pass - # correctly retrieved values to this function - serialized_value = [] - for i, val in enumerate(values or []): - db_type = prop.get_item_definition_at_index(i).DB_TYPE - if db_type is None: - continue - serialized_value.append({ - 'type': db_type, - 'value': val - }) - return serialized_value - - -def _serialize_dict_prop(artifact, prop, key, value, save_prop_func): - key_to_save = prop.name + '.' + key - dict_key_prop = prop.get_prop_definition_at_key(key) - db_type = dict_key_prop.DB_TYPE - if (db_type is None and - not isinstance(dict_key_prop, - declarative.ListAttributeDefinition)): - # nothing to do here, don't know how to deal with this type - return - elif isinstance(dict_key_prop, - declarative.ListAttributeDefinition): - serialized = _serialize_list_prop( - dict_key_prop, - # FIXME(see comment for _serialize_list_prop func) - values=(dict_key_prop.get_value(artifact) or {}).get(key, [])) - save_prop_func(key_to_save, 'array', serialized) - else: - save_prop_func(key_to_save, db_type, value) - - -def _serialize_dependencies(artifact): - """Returns a dict of serialized dependencies for given artifact""" - dependencies = {} - for relation in artifact.metadata.attributes.dependencies.values(): - serialized_dependency = [] - if isinstance(relation, declarative.ListAttributeDefinition): - for dep in relation.get_value(artifact): - serialized_dependency.append(dep.id) - else: - relation_data = relation.get_value(artifact) - if relation_data: - serialized_dependency.append(relation.get_value(artifact).id) - dependencies[relation.name] = serialized_dependency - return dependencies - - -def _serialize_blobs(artifact): - """Return a dict of serialized blobs for given artifact""" - blobs = {} - for blob in artifact.metadata.attributes.blobs.values(): - serialized_blob = [] - if isinstance(blob, declarative.ListAttributeDefinition): - for b in blob.get_value(artifact) or []: - serialized_blob.append({ - 'size': b.size, - 'locations': b.locations, - 'checksum': b.checksum, - 'item_key': b.item_key - }) - else: - b = blob.get_value(artifact) - # if no value for blob has been set -> continue - if not b: - continue - serialized_blob.append({ - 'size': b.size, - 'locations': b.locations, - 'checksum': b.checksum, - 'item_key': b.item_key - }) - blobs[blob.name] = serialized_blob - return blobs - - -def serialize_for_db(artifact): - result = {} - custom_properties = {} - - def _save_prop(prop_key, prop_type, value): - custom_properties[prop_key] = { - 'type': prop_type, - 'value': value - } - - for prop in artifact.metadata.attributes.properties.values(): - if prop.name in COMMON_ARTIFACT_PROPERTIES: - result[prop.name] = prop.get_value(artifact) - continue - if isinstance(prop, declarative.ListAttributeDefinition): - serialized_value = _serialize_list_prop(prop, - prop.get_value(artifact)) - _save_prop(prop.name, 'array', serialized_value) - elif isinstance(prop, declarative.DictAttributeDefinition): - fields_to_set = prop.get_value(artifact) or {} - # if some keys are not present (like in prop == {}), then have to - # set their values to None. - # XXX FIXME prop.properties may be a dict ({'foo': '', 'bar': ''}) - # or String\Integer\whatsoever, limiting the possible dict values. - # In the latter case have no idea how to remove old values during - # serialization process. - if isinstance(prop.properties, dict): - for key in [k for k in prop.properties - if k not in fields_to_set.keys()]: - _serialize_dict_prop(artifact, prop, key, None, _save_prop) - # serialize values of properties present - for key, value in six.iteritems(fields_to_set): - _serialize_dict_prop(artifact, prop, key, value, _save_prop) - elif prop.DB_TYPE is not None: - _save_prop(prop.name, prop.DB_TYPE, prop.get_value(artifact)) - - result['properties'] = custom_properties - result['dependencies'] = _serialize_dependencies(artifact) - result['blobs'] = _serialize_blobs(artifact) - return result - - -def _deserialize_blobs(artifact_type, blobs_from_db, artifact_properties): - """Retrieves blobs from database""" - for blob_name, blob_value in six.iteritems(blobs_from_db): - if not blob_value: - continue - if isinstance(artifact_type.metadata.attributes.blobs.get(blob_name), - declarative.ListAttributeDefinition): - val = [] - for v in blob_value: - b = definitions.Blob(size=v['size'], - locations=v['locations'], - checksum=v['checksum'], - item_key=v['item_key']) - val.append(b) - elif len(blob_value) == 1: - val = definitions.Blob(size=blob_value[0]['size'], - locations=blob_value[0]['locations'], - checksum=blob_value[0]['checksum'], - item_key=blob_value[0]['item_key']) - else: - raise exception.InvalidArtifactPropertyValue( - message=_('Blob %(name)s may not have multiple values'), - name=blob_name) - artifact_properties[blob_name] = val - - -def _deserialize_dependencies(artifact_type, deps_from_db, - artifact_properties, plugins): - """Retrieves dependencies from database""" - for dep_name, dep_value in six.iteritems(deps_from_db): - if not dep_value: - continue - if isinstance( - artifact_type.metadata.attributes.dependencies.get(dep_name), - declarative.ListAttributeDefinition): - val = [] - for v in dep_value: - val.append(deserialize_from_db(v, plugins)) - elif len(dep_value) == 1: - val = deserialize_from_db(dep_value[0], plugins) - else: - raise exception.InvalidArtifactPropertyValue( - message=_('Relation %(name)s may not have multiple values'), - name=dep_name) - artifact_properties[dep_name] = val - - -def deserialize_from_db(db_dict, plugins): - artifact_properties = {} - type_name = None - type_version = None - - for prop_name in COMMON_ARTIFACT_PROPERTIES: - prop_value = db_dict.pop(prop_name, None) - if prop_name == 'type_name': - type_name = prop_value - elif prop_name == 'type_version': - type_version = prop_value - else: - artifact_properties[prop_name] = prop_value - - try: - artifact_type = plugins.get_class_by_typename(type_name, type_version) - except exception.ArtifactPluginNotFound: - raise exception.UnknownArtifactType(name=type_name, - version=type_version) - - type_specific_properties = db_dict.pop('properties', {}) - for prop_name, prop_value in six.iteritems(type_specific_properties): - prop_type = prop_value.get('type') - prop_value = prop_value.get('value') - if prop_value is None: - continue - if '.' in prop_name: # dict-based property - name, key = prop_name.split('.', 1) - artifact_properties.setdefault(name, {}) - if prop_type == 'array': - artifact_properties[name][key] = [item.get('value') for item in - prop_value] - else: - artifact_properties[name][key] = prop_value - elif prop_type == 'array': # list-based property - artifact_properties[prop_name] = [item.get('value') for item in - prop_value] - else: - artifact_properties[prop_name] = prop_value - - blobs = db_dict.pop('blobs', {}) - _deserialize_blobs(artifact_type, blobs, artifact_properties) - - dependencies = db_dict.pop('dependencies', {}) - _deserialize_dependencies(artifact_type, dependencies, - artifact_properties, plugins) - - return artifact_type(**artifact_properties) - - -def _process_blobs_for_client(artifact, result): - """Processes artifact's blobs: adds download links and pretty-printed data. - - The result is stored in 'result' dict. - """ - def build_uri(blob_attr, position=None): - """A helper func to build download uri""" - template = "/artifacts/%(type)s/v%(version)s/%(id)s/%(prop)s/download" - format_dict = { - "type": artifact.metadata.endpoint, - "version": artifact.type_version, - "id": artifact.id, - "prop": blob_attr.name - } - if position is not None: - template = ("/artifacts/%(type)s/v%(version)s/" - "%(id)s/%(prop)s/%(position)s/download") - format_dict["position"] = position - - return template % format_dict - - for blob_attr in artifact.metadata.attributes.blobs.values(): - value = blob_attr.get_value(artifact) - if value is None: - result[blob_attr.name] = None - elif isinstance(value, collections.Iterable): - res_list = [] - for pos, blob in enumerate(value): - blob_dict = blob.to_dict() - blob_dict["download_link"] = build_uri(blob_attr, pos) - res_list.append(blob_dict) - result[blob_attr.name] = res_list - else: - result[blob_attr.name] = value.to_dict() - result[blob_attr.name]["download_link"] = build_uri(blob_attr) - - -def serialize_for_client(artifact, show_level=ga.Showlevel.NONE): - # use serialize_for_db and modify some fields - # (like properties, show only value, not type) - result = {} - - for prop in artifact.metadata.attributes.properties.values(): - result[prop.name] = prop.get_value(artifact) - - if show_level > ga.Showlevel.NONE: - for dep in artifact.metadata.attributes.dependencies.values(): - inner_show_level = (ga.Showlevel.DIRECT - if show_level == ga.Showlevel.DIRECT - else ga.Showlevel.NONE) - value = dep.get_value(artifact) - if value is None: - result[dep.name] = None - elif isinstance(value, list): - result[dep.name] = [serialize_for_client(v, inner_show_level) - for v in value] - else: - result[dep.name] = serialize_for_client(value, - inner_show_level) - _process_blobs_for_client(artifact, result) - return result diff --git a/glance/common/semver_db.py b/glance/common/semver_db.py deleted file mode 100644 index 6d670777fc..0000000000 --- a/glance/common/semver_db.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import operator - -import semantic_version -from sqlalchemy.orm.properties import CompositeProperty -from sqlalchemy import sql - -from glance.common import exception -from glance.i18n import _ - -MAX_COMPONENT_LENGTH = pow(2, 16) - 1 -MAX_NUMERIC_PRERELEASE_LENGTH = 6 - - -class DBVersion(object): - def __init__(self, components_long, prerelease, build): - """ - Creates a DBVersion object out of 3 component fields. This initializer - is supposed to be called from SQLAlchemy if 3 database columns are - mapped to this composite field. - - :param components_long: a 64-bit long value, containing numeric - components of the version - :param prerelease: a prerelease label of the version, optionally - preformatted with leading zeroes in numeric-only parts of the label - :param build: a build label of the version - """ - version_string = '%s.%s.%s' % _long_to_components(components_long) - if prerelease: - version_string += '-' + _strip_leading_zeroes_from_prerelease( - prerelease) - - if build: - version_string += '+' + build - self.version = semantic_version.Version(version_string) - - def __repr__(self): - return str(self.version) - - def __eq__(self, other): - return (isinstance(other, DBVersion) and - other.version == self.version) - - def __ne__(self, other): - return (not isinstance(other, DBVersion) - or self.version != other.version) - - def __composite_values__(self): - long_version = _version_to_long(self.version) - prerelease = _add_leading_zeroes_to_prerelease(self.version.prerelease) - build = '.'.join(self.version.build) if self.version.build else None - return long_version, prerelease, build - - -def parse(version_string): - version = semantic_version.Version.coerce(version_string) - return DBVersion(_version_to_long(version), - '.'.join(version.prerelease), - '.'.join(version.build)) - - -def _check_limit(value): - if value > MAX_COMPONENT_LENGTH: - reason = _("Version component is too " - "large (%d max)") % MAX_COMPONENT_LENGTH - raise exception.InvalidVersion(reason=reason) - - -def _version_to_long(version): - """ - Converts the numeric part of the semver version into the 64-bit long value - using the following logic: - - * major version is stored in first 16 bits of the value - * minor version is stored in next 16 bits - * patch version is stored in following 16 bits - * next 2 bits are used to store the flag: if the version has pre-release - label then these bits are 00, otherwise they are 11. Intermediate values - of the flag (01 and 10) are reserved for future usage. - * last 14 bits of the value are reserved for future usage - - The numeric components of version are checked so their value does not - exceed 16 bits. - - :param version: a semantic_version.Version object - """ - _check_limit(version.major) - _check_limit(version.minor) - _check_limit(version.patch) - major = version.major << 48 - minor = version.minor << 32 - patch = version.patch << 16 - flag = 0 if version.prerelease else 2 - flag <<= 14 - return major | minor | patch | flag - - -def _long_to_components(value): - major = value >> 48 - minor = (value - (major << 48)) >> 32 - patch = (value - (major << 48) - (minor << 32)) >> 16 - return str(major), str(minor), str(patch) - - -def _add_leading_zeroes_to_prerelease(label_tuple): - if label_tuple is None: - return None - res = [] - for component in label_tuple: - if component.isdigit(): - if len(component) > MAX_NUMERIC_PRERELEASE_LENGTH: - reason = _("Prerelease numeric component is too large " - "(%d characters " - "max)") % MAX_NUMERIC_PRERELEASE_LENGTH - raise exception.InvalidVersion(reason=reason) - res.append(component.rjust(MAX_NUMERIC_PRERELEASE_LENGTH, '0')) - else: - res.append(component) - return '.'.join(res) - - -def _strip_leading_zeroes_from_prerelease(string_value): - res = [] - for component in string_value.split('.'): - if component.isdigit(): - val = component.lstrip('0') - if len(val) == 0: # Corner case: when the component is just '0' - val = '0' # it will be stripped completely, so restore it - res.append(val) - else: - res.append(component) - return '.'.join(res) - -strict_op_map = { - operator.ge: operator.gt, - operator.le: operator.lt -} - - -class VersionComparator(CompositeProperty.Comparator): - def _get_comparison(self, values, op): - columns = self.__clause_element__().clauses - if op in strict_op_map: - stricter_op = strict_op_map[op] - else: - stricter_op = op - - return sql.or_(stricter_op(columns[0], values[0]), - sql.and_(columns[0] == values[0], - op(columns[1], values[1]))) - - def __gt__(self, other): - return self._get_comparison(other.__composite_values__(), operator.gt) - - def __ge__(self, other): - return self._get_comparison(other.__composite_values__(), operator.ge) - - def __lt__(self, other): - return self._get_comparison(other.__composite_values__(), operator.lt) - - def __le__(self, other): - return self._get_comparison(other.__composite_values__(), operator.le) diff --git a/glance/contrib/__init__.py b/glance/contrib/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/__init__.py b/glance/contrib/plugins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/artifacts_sample/__init__.py b/glance/contrib/plugins/artifacts_sample/__init__.py deleted file mode 100644 index 7406b2d20e..0000000000 --- a/glance/contrib/plugins/artifacts_sample/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from glance.contrib.plugins.artifacts_sample.v1 import artifact as art1 -from glance.contrib.plugins.artifacts_sample.v2 import artifact as art2 - - -MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact] diff --git a/glance/contrib/plugins/artifacts_sample/base.py b/glance/contrib/plugins/artifacts_sample/base.py deleted file mode 100644 index 9857c6100d..0000000000 --- a/glance/contrib/plugins/artifacts_sample/base.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common.glare import definitions - - -class BaseArtifact(definitions.ArtifactType): - __type_version__ = "1.0" - prop1 = definitions.String() - prop2 = definitions.Integer() - int_list = definitions.Array(item_type=definitions.Integer(max_value=10, - min_value=1)) - depends_on = definitions.ArtifactReference(type_name='MyArtifact') - references = definitions.ArtifactReferenceList() - - image_file = definitions.BinaryObject() - screenshots = definitions.BinaryObjectList() diff --git a/glance/contrib/plugins/artifacts_sample/setup.cfg b/glance/contrib/plugins/artifacts_sample/setup.cfg deleted file mode 100644 index 7d5234ae7e..0000000000 --- a/glance/contrib/plugins/artifacts_sample/setup.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[metadata] -name = artifact -version = 0.0.1 -description = A sample plugin for artifact loading -author = Inessa Vasilevskaya -author-email = ivasilevskaya@mirantis.com -classifier = - Development Status :: 3 - Alpha - License :: OSI Approved :: Apache Software License - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.2 - Programming Language :: Python :: 3.3 - Intended Audience :: Developers - Environment :: Console - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -glance.artifacts.types = - MyArtifact = glance.contrib.plugins.artifacts_sample:MY_ARTIFACT diff --git a/glance/contrib/plugins/artifacts_sample/setup.py b/glance/contrib/plugins/artifacts_sample/setup.py deleted file mode 100644 index 2a3ea51e79..0000000000 --- a/glance/contrib/plugins/artifacts_sample/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import setuptools - -# all other params will be taken from setup.cfg -setuptools.setup(packages=setuptools.find_packages(), - setup_requires=['pbr'], pbr=True) diff --git a/glance/contrib/plugins/artifacts_sample/v1/__init__.py b/glance/contrib/plugins/artifacts_sample/v1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/artifacts_sample/v2/__init__.py b/glance/contrib/plugins/artifacts_sample/v2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/image_artifact/__init__.py b/glance/contrib/plugins/image_artifact/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/image_artifact/requirements.txt b/glance/contrib/plugins/image_artifact/requirements.txt deleted file mode 100644 index 5cee777d45..0000000000 --- a/glance/contrib/plugins/image_artifact/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -python-glanceclient diff --git a/glance/contrib/plugins/image_artifact/setup.cfg b/glance/contrib/plugins/image_artifact/setup.cfg deleted file mode 100644 index 38253c792d..0000000000 --- a/glance/contrib/plugins/image_artifact/setup.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[metadata] -name = image_artifact_plugin -version = 2.0 -description = An artifact plugin for Imaging functionality -author = Alexander Tivelkov -author-email = ativelkov@mirantis.com -classifier = - Development Status :: 3 - Alpha - License :: OSI Approved :: Apache Software License - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.2 - Programming Language :: Python :: 3.3 - Intended Audience :: Developers - Environment :: Console - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -glance.artifacts.types = - Image = glance.contrib.plugins.image_artifact.version_selector:versions diff --git a/glance/contrib/plugins/image_artifact/v1/__init__.py b/glance/contrib/plugins/image_artifact/v1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/image_artifact/v1/image.py b/glance/contrib/plugins/image_artifact/v1/image.py deleted file mode 100644 index d842416ff7..0000000000 --- a/glance/contrib/plugins/image_artifact/v1/image.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common.glare import definitions - - -class ImageAsAnArtifact(definitions.ArtifactType): - __type_name__ = 'Image' - __endpoint__ = 'images' - - file = definitions.BinaryObject(required=True) - disk_format = definitions.String(allowed_values=['ami', 'ari', 'aki', - 'vhd', 'vhdx', 'vmdk', - 'raw', 'qcow2', 'vdi', - 'iso'], - required=True, - mutable=False) - container_format = definitions.String(allowed_values=['ami', 'ari', - 'aki', 'bare', - 'ovf', 'ova', - 'docker'], - required=True, - mutable=False) - min_disk = definitions.Integer(min_value=0, default=0) - min_ram = definitions.Integer(min_value=0, default=0) - - virtual_size = definitions.Integer(min_value=0) diff --git a/glance/contrib/plugins/image_artifact/v1_1/__init__.py b/glance/contrib/plugins/image_artifact/v1_1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/image_artifact/v1_1/image.py b/glance/contrib/plugins/image_artifact/v1_1/image.py deleted file mode 100644 index ea76ce6df0..0000000000 --- a/glance/contrib/plugins/image_artifact/v1_1/image.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from glance.common.glare import definitions -import glance.contrib.plugins.image_artifact.v1.image as v1 - - -class ImageAsAnArtifact(v1.ImageAsAnArtifact): - __type_version__ = '1.1' - - icons = definitions.BinaryObjectList() - - similar_images = (definitions. - ArtifactReferenceList(references=definitions. - ArtifactReference('Image'))) diff --git a/glance/contrib/plugins/image_artifact/v2/__init__.py b/glance/contrib/plugins/image_artifact/v2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/contrib/plugins/image_artifact/v2/image.py b/glance/contrib/plugins/image_artifact/v2/image.py deleted file mode 100644 index 52831fde21..0000000000 --- a/glance/contrib/plugins/image_artifact/v2/image.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import exception -from glance.common.glare import definitions -import glance.contrib.plugins.image_artifact.v1_1.image as v1_1 - -# Since this is not in the test-requirements.txt and the class below, -# ImageAsAnArtifact, is pending removal a try except is added to prevent -# an ImportError when module docs are generated -try: - import glanceclient -except ImportError: - glanceclient = None - - -from glance.i18n import _ - - -class ImageAsAnArtifact(v1_1.ImageAsAnArtifact): - __type_version__ = '2.0' - - file = definitions.BinaryObject(required=False) - legacy_image_id = definitions.String(required=False, mutable=False, - pattern=R'[0-9a-f]{8}-[0-9a-f]{4}' - R'-4[0-9a-f]{3}-[89ab]' - R'[0-9a-f]{3}-[0-9a-f]{12}') - - def __pre_publish__(self, context, *args, **kwargs): - super(ImageAsAnArtifact, self).__pre_publish__(*args, **kwargs) - if self.file is None and self.legacy_image_id is None: - raise exception.InvalidArtifactPropertyValue( - message=_("Either a file or a legacy_image_id has to be " - "specified") - ) - if self.file is not None and self.legacy_image_id is not None: - raise exception.InvalidArtifactPropertyValue( - message=_("Both file and legacy_image_id may not be " - "specified at the same time")) - - if self.legacy_image_id: - glance_endpoint = next(service['endpoints'][0]['publicURL'] - for service in context.service_catalog - if service['name'] == 'glance') - # Ensure glanceclient is imported correctly since we are catching - # the ImportError on initialization - if glanceclient == None: - raise ImportError(_("Glance client not installed")) - - try: - client = glanceclient.Client(version=2, - endpoint=glance_endpoint, - token=context.auth_token) - legacy_image = client.images.get(self.legacy_image_id) - except Exception: - raise exception.InvalidArtifactPropertyValue( - message=_('Unable to get legacy image') - ) - if legacy_image is not None: - self.file = definitions.Blob(size=legacy_image.size, - locations=[ - { - "status": "active", - "value": - legacy_image.direct_url - }], - checksum=legacy_image.checksum, - item_key=legacy_image.id) - else: - raise exception.InvalidArtifactPropertyValue( - message=_("Legacy image was not found") - ) diff --git a/glance/contrib/plugins/image_artifact/version_selector.py b/glance/contrib/plugins/image_artifact/version_selector.py deleted file mode 100644 index 0b1fd7e215..0000000000 --- a/glance/contrib/plugins/image_artifact/version_selector.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.contrib.plugins.image_artifact.v1 import image as v1 -from glance.contrib.plugins.image_artifact.v1_1 import image as v1_1 -from glance.contrib.plugins.image_artifact.v2 import image as v2 - -versions = [v1.ImageAsAnArtifact, v1_1.ImageAsAnArtifact, v2.ImageAsAnArtifact] diff --git a/glance/db/__init__.py b/glance/db/__init__.py index 914fe2a3b0..e2120dd34c 100644 --- a/glance/db/__init__.py +++ b/glance/db/__init__.py @@ -24,11 +24,9 @@ from wsme.rest import json from glance.api.v2.model.metadef_property_type import PropertyType from glance.common import crypt from glance.common import exception -from glance.common.glare import serialization from glance.common import location_strategy import glance.domain import glance.domain.proxy -from glance import glare as ga from glance.i18n import _ CONF = cfg.CONF @@ -59,99 +57,6 @@ IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size', 'protected']) -class ArtifactRepo(object): - fields = ['id', 'name', 'version', 'type_name', 'type_version', - 'visibility', 'state', 'owner', 'scope', 'created_at', - 'updated_at', 'tags', 'dependencies', 'blobs', 'properties'] - - def __init__(self, context, db_api, plugins): - self.context = context - self.db_api = db_api - self.plugins = plugins - - def get(self, artifact_id, type_name=None, type_version=None, - show_level=None, include_deleted=False): - if show_level is None: - show_level = ga.Showlevel.BASIC - try: - db_api_artifact = self.db_api.artifact_get(self.context, - artifact_id, - type_name, - type_version, - show_level) - if db_api_artifact["state"] == 'deleted' and not include_deleted: - raise exception.ArtifactNotFound(artifact_id) - except (exception.ArtifactNotFound, exception.ArtifactForbidden): - msg = _("No artifact found with ID %s") % artifact_id - raise exception.ArtifactNotFound(msg) - return serialization.deserialize_from_db(db_api_artifact, self.plugins) - - def list(self, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None, - show_level=None): - sort_keys = ['created_at'] if sort_keys is None else sort_keys - sort_dirs = ['desc'] if sort_dirs is None else sort_dirs - if show_level is None: - show_level = ga.Showlevel.NONE - db_api_artifacts = self.db_api.artifact_get_all( - self.context, filters=filters, marker=marker, limit=limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, show_level=show_level) - artifacts = [] - for db_api_artifact in db_api_artifacts: - artifact = serialization.deserialize_from_db(db_api_artifact, - self.plugins) - artifacts.append(artifact) - return artifacts - - def _format_artifact_from_db(self, db_artifact): - kwargs = {k: db_artifact.get(k, None) for k in self.fields} - return glance.domain.Artifact(**kwargs) - - def add(self, artifact): - artifact_values = serialization.serialize_for_db(artifact) - artifact_values['updated_at'] = artifact.updated_at - self.db_api.artifact_create(self.context, artifact_values, - artifact.type_name, artifact.type_version) - - def save(self, artifact): - artifact_values = serialization.serialize_for_db(artifact) - try: - db_api_artifact = self.db_api.artifact_update( - self.context, - artifact_values, - artifact.id, - artifact.type_name, - artifact.type_version) - except (exception.ArtifactNotFound, - exception.ArtifactForbidden): - msg = _("No artifact found with ID %s") % artifact.id - raise exception.ArtifactNotFound(msg) - return serialization.deserialize_from_db(db_api_artifact, self.plugins) - - def remove(self, artifact): - try: - self.db_api.artifact_delete(self.context, artifact.id, - artifact.type_name, - artifact.type_version) - except (exception.NotFound, exception.Forbidden): - msg = _("No artifact found with ID %s") % artifact.id - raise exception.ArtifactNotFound(msg) - - def publish(self, artifact): - try: - artifact_changed = ( - self.db_api.artifact_publish( - self.context, - artifact.id, - artifact.type_name, - artifact.type_version)) - return serialization.deserialize_from_db(artifact_changed, - self.plugins) - except (exception.NotFound, exception.Forbidden): - msg = _("No artifact found with ID %s") % artifact.id - raise exception.ArtifactNotFound(msg) - - class ImageRepo(object): def __init__(self, context, db_api): diff --git a/glance/db/migration.py b/glance/db/migration.py index 17a9b84954..638894b22c 100644 --- a/glance/db/migration.py +++ b/glance/db/migration.py @@ -49,7 +49,7 @@ EXPAND_BRANCH = 'expand' CONTRACT_BRANCH = 'contract' CURRENT_RELEASE = 'pike' ALEMBIC_INIT_VERSION = 'liberty' -LATEST_REVISION = 'ocata01' +LATEST_REVISION = 'pike01' INIT_VERSION = 0 MIGRATE_REPO_PATH = os.path.join( diff --git a/glance/db/registry/api.py b/glance/db/registry/api.py index a71eeb7c96..a2609b365d 100644 --- a/glance/db/registry/api.py +++ b/glance/db/registry/api.py @@ -32,7 +32,6 @@ database back-end. import functools from glance.db import utils as db_utils -from glance import glare from glance.registry.client.v2 import api @@ -545,53 +544,3 @@ def metadef_tag_delete_namespace_content( @_get_client def metadef_tag_count(client, namespace_name, session=None): return client.metadef_tag_count(namespace_name=namespace_name) - - -@_get_client -def artifact_create(client, values, - type_name, type_version=None, session=None): - return client.artifact_create(values=values, - type_name=type_name, - type_version=type_version) - - -@_get_client -def artifact_update(client, values, artifact_id, - type_name, type_version=None, session=None): - return client.artifact_update(values=values, artifact_id=artifact_id, - type_name=type_name, - type_version=type_version) - - -@_get_client -def artifact_delete(client, artifact_id, - type_name, type_version=None, session=None): - return client.artifact_delete(artifact_id=artifact_id, - type_name=type_name, - type_version=type_version) - - -@_get_client -def artifact_get(client, artifact_id, - type_name, type_version=None, session=None): - return client.artifact_get(artifact_id=artifact_id, - type_name=type_name, - type_version=type_version) - - -@_get_client -def artifact_get_all(client, marker=None, limit=None, sort_key=None, - sort_dir=None, filters=None, - show_level=glare.Showlevel.NONE, session=None): - if filters is None: - filters = {} - return client.artifact_create(marker, limit, sort_key, - sort_dir, filters, show_level) - - -@_get_client -def artifact_publish(client, artifact_id, - type_name, type_version=None, session=None): - return client.artifact_publish(artifact_id=artifact_id, - type_name=type_name, - type_version=type_version) diff --git a/glance/db/simple/api.py b/glance/db/simple/api.py index 90774193ae..000ec30e24 100644 --- a/glance/db/simple/api.py +++ b/glance/db/simple/api.py @@ -43,12 +43,6 @@ DATA = { 'locations': [], 'tasks': {}, 'task_info': {}, - 'artifacts': {}, - 'artifact_properties': {}, - 'artifact_tags': {}, - 'artifact_dependencies': {}, - 'artifact_blobs': {}, - 'artifact_blob_locations': {} } INDEX = 0 @@ -85,7 +79,6 @@ def reset(): 'locations': [], 'tasks': {}, 'task_info': {}, - 'artifacts': {} } @@ -1939,96 +1932,6 @@ def metadef_tag_count(context, namespace_name): return count -def _artifact_format(artifact_id, **values): - dt = timeutils.utcnow() - artifact = { - 'id': artifact_id, - 'type_name': None, - 'type_version_prefix': None, - 'type_version_suffix': None, - 'type_version_meta': None, - 'version_prefix': None, - 'version_suffix': None, - 'version_meta': None, - 'description': None, - 'visibility': None, - 'state': None, - 'owner': None, - 'scope': None, - 'tags': [], - 'properties': {}, - 'blobs': [], - 'created_at': dt, - 'updated_at': dt, - 'deleted_at': None, - 'deleted': False, - } - - artifact.update(values) - return artifact - - -@log_call -def artifact_create(context, values, type_name, type_version): - global DATA - artifact_id = values.get('id', str(uuid.uuid4())) - - if artifact_id in DATA['artifacts']: - raise exception.Duplicate() - - if 'state' not in values: - raise exception.Invalid('state is a required attribute') - - allowed_keys = set(['id', - 'type_name', - 'type_version', - 'name', - 'version', - 'description', - 'visibility', - 'state', - 'owner', - 'scope']) - - incorrect_keys = set(values.keys()) - allowed_keys - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - artifact = _artifact_format(artifact_id, **values) - DATA['artifacts'][artifact_id] = artifact - - return copy.deepcopy(artifact) - - -def _artifact_get(context, artifact_id, type_name, - type_version=None): - try: - artifact = DATA['artifacts'][artifact_id] - if (artifact['type_name'] != type_name or - (type_version is not None and - artifact['type_version'] != type_version)): - raise KeyError - except KeyError: - LOG.info(_LI('Could not find artifact %s'), artifact_id) - raise exception.NotFound() - - if artifact['deleted_at']: - LOG.info(_LI('Unable to get deleted image')) - raise exception.NotFound() - - return artifact - - -@log_call -def artifact_get(context, artifact_id, - type_name, - type_version=None, session=None): - artifact = _artifact_get(context, artifact_id, type_name, - type_version) - return copy.deepcopy(artifact) - - def _format_association(namespace, resource_type, association_values): association = { 'namespace_id': namespace['id'], diff --git a/glance/contrib/plugins/artifacts_sample/v1/artifact.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py similarity index 63% rename from glance/contrib/plugins/artifacts_sample/v1/artifact.py rename to glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py index f224edd305..b1f02ca474 100644 --- a/glance/contrib/plugins/artifacts_sample/v1/artifact.py +++ b/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py @@ -1,6 +1,3 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,9 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. - -from glance.contrib.plugins.artifacts_sample import base +# NOTE(rosmaita): This file implements the migration interface, but doesn't +# migrate any data. The pike01 migration is contract-only. -class MyArtifact(base.BaseArtifact): - __type_version__ = "1.0.1" +def has_migrations(engine): + """Returns true if at least one data row can be migrated.""" + + return False + + +def migrate(engine): + """Return the number of rows migrated.""" + + return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/env.py b/glance/db/sqlalchemy/alembic_migrations/env.py index 0de0d82f05..12d2945513 100644 --- a/glance/db/sqlalchemy/alembic_migrations/env.py +++ b/glance/db/sqlalchemy/alembic_migrations/env.py @@ -20,7 +20,6 @@ from alembic import context from sqlalchemy import engine_from_config, pool from glance.db.sqlalchemy import models -from glance.db.sqlalchemy import models_glare from glance.db.sqlalchemy import models_metadef # this is the Alembic Config object, which provides @@ -39,8 +38,6 @@ log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support target_metadata = models.BASE.metadata -for table in models_glare.BASE.metadata.sorted_tables: - target_metadata._add_table(table.name, table.schema, table) for table in models_metadef.BASE_DICT.metadata.sorted_tables: target_metadata._add_table(table.name, table.schema, table) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py new file mode 100644 index 0000000000..b7886c47dc --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""drop glare artifacts tables + +Revision ID: pike01 +Revises: ocata01 +Create Date: 2017-02-08 20:32:51.200867 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'pike01' +down_revision = 'ocata01' +branch_labels = None +depends_on = None + + +def upgrade(): + # create list of artifact tables in reverse order of their creation + table_names = [] + table_names.append('artifact_blob_locations') + table_names.append('artifact_properties') + table_names.append('artifact_blobs') + table_names.append('artifact_dependencies') + table_names.append('artifact_tags') + table_names.append('artifacts') + + for table_name in table_names: + op.drop_table(table_name=table_name) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py new file mode 100644 index 0000000000..11e4eb4144 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""drop glare artifacts tables + +Revision ID: pike_contract01 +Revises: ocata_contract01 +Create Date: 2017-02-09 20:32:51.222867 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'pike_contract01' +down_revision = 'ocata_contract01' +branch_labels = None +depends_on = 'pike_expand01' + + +def upgrade(): + # create list of artifact tables in reverse order of their creation + table_names = [] + table_names.append('artifact_blob_locations') + table_names.append('artifact_properties') + table_names.append('artifact_blobs') + table_names.append('artifact_dependencies') + table_names.append('artifact_tags') + table_names.append('artifacts') + + for table_name in table_names: + op.drop_table(table_name=table_name) diff --git a/glance/contrib/plugins/artifacts_sample/v2/artifact.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py similarity index 64% rename from glance/contrib/plugins/artifacts_sample/v2/artifact.py rename to glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py index 6db376d745..24cc670807 100644 --- a/glance/contrib/plugins/artifacts_sample/v2/artifact.py +++ b/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py @@ -1,6 +1,3 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,11 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. +"""empty expand for symmetry with pike_contract01 -from glance.common.glare import definitions -from glance.contrib.plugins.artifacts_sample import base +Revision ID: pike_expand01 +Revises: ocata_expand01 +Create Date: 2017-02-09 19:55:16.657499 + +""" + +# revision identifiers, used by Alembic. +revision = 'pike_expand01' +down_revision = 'ocata_expand01' +branch_labels = None +depends_on = None -class MyArtifact(base.BaseArtifact): - __type_version__ = "2.0" - depends_on = definitions.ArtifactReference(type_name="MyArtifact") +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/api.py b/glance/db/sqlalchemy/api.py index 96bed7256b..24ed42a670 100644 --- a/glance/db/sqlalchemy/api.py +++ b/glance/db/sqlalchemy/api.py @@ -43,7 +43,6 @@ import sqlalchemy.sql as sa_sql from glance.common import exception from glance.common import timeutils from glance.common import utils -from glance.db.sqlalchemy import glare from glance.db.sqlalchemy.metadef_api import (resource_type as metadef_resource_type_api) from glance.db.sqlalchemy.metadef_api import (resource_type_association @@ -54,7 +53,6 @@ from glance.db.sqlalchemy.metadef_api import property as metadef_property_api from glance.db.sqlalchemy.metadef_api import tag as metadef_tag_api from glance.db.sqlalchemy import models from glance.db import utils as db_utils -from glance import glare as ga from glance.i18n import _, _LW, _LI sa_logger = None @@ -1873,58 +1871,3 @@ def metadef_tag_count(context, namespace_name, session=None): """Get count of tags for a namespace, raise if ns doesn't exist.""" session = session or get_session() return metadef_tag_api.count(context, namespace_name, session) - - -def artifact_create(context, values, type_name, - type_version=None, session=None): - session = session or get_session() - artifact = glare.create(context, values, session, type_name, - type_version) - return artifact - - -def artifact_delete(context, artifact_id, type_name, - type_version=None, session=None): - session = session or get_session() - artifact = glare.delete(context, artifact_id, session, type_name, - type_version) - return artifact - - -def artifact_update(context, values, artifact_id, type_name, - type_version=None, session=None): - session = session or get_session() - artifact = glare.update(context, values, artifact_id, session, - type_name, type_version) - return artifact - - -def artifact_get(context, artifact_id, - type_name=None, - type_version=None, - show_level=ga.Showlevel.BASIC, - session=None): - session = session or get_session() - return glare.get(context, artifact_id, session, type_name, - type_version, show_level) - - -def artifact_publish(context, - artifact_id, - type_name, - type_version=None, - session=None): - session = session or get_session() - return glare.publish(context, - artifact_id, - session, - type_name, - type_version) - - -def artifact_get_all(context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, - show_level=ga.Showlevel.NONE, session=None): - session = session or get_session() - return glare.get_all(context, session, marker, limit, sort_keys, - sort_dirs, filters, show_level) diff --git a/glance/db/sqlalchemy/glare.py b/glance/db/sqlalchemy/glare.py deleted file mode 100644 index 2bda0cc59a..0000000000 --- a/glance/db/sqlalchemy/glare.py +++ /dev/null @@ -1,784 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import operator -import uuid - -from enum import Enum -from oslo_db import exception as db_exc -import sqlalchemy -from sqlalchemy import and_ -from sqlalchemy import case -from sqlalchemy import or_ -import sqlalchemy.orm as orm -from sqlalchemy.orm import joinedload - -from glance.common import exception -from glance.common import semver_db -from glance.common import timeutils -from glance.db.sqlalchemy import models_glare as models -import glance.glare as ga -from glance.i18n import _LE, _LW -from oslo_log import log as os_logging - -LOG = os_logging.getLogger(__name__) - - -class Visibility(Enum): - PRIVATE = 'private' - PUBLIC = 'public' - SHARED = 'shared' - - -class State(Enum): - CREATING = 'creating' - ACTIVE = 'active' - DEACTIVATED = 'deactivated' - DELETED = 'deleted' - - -TRANSITIONS = { - State.CREATING: [State.ACTIVE, State.DELETED], - State.ACTIVE: [State.DEACTIVATED, State.DELETED], - State.DEACTIVATED: [State.ACTIVE, State.DELETED], - State.DELETED: [] -} - - -def create(context, values, session, type_name, type_version=None): - return _out(_create_or_update(context, values, None, session, - type_name, type_version)) - - -def update(context, values, artifact_id, session, - type_name, type_version=None): - return _out(_create_or_update(context, values, artifact_id, session, - type_name, type_version)) - - -def delete(context, artifact_id, session, type_name, type_version=None): - values = {'state': 'deleted'} - return _out(_create_or_update(context, values, artifact_id, session, - type_name, type_version)) - - -def _create_or_update(context, values, artifact_id, session, type_name, - type_version=None): - values = copy.deepcopy(values) - with session.begin(): - _set_version_fields(values) - _validate_values(values) - _drop_protected_attrs(models.Artifact, values) - if artifact_id: - # update existing artifact - state = values.get('state') - show_level = ga.Showlevel.BASIC - if state is not None: - if state == 'active': - show_level = ga.Showlevel.DIRECT - values['published_at'] = timeutils.utcnow() - if state == 'deleted': - values['deleted_at'] = timeutils.utcnow() - - artifact = _get(context, artifact_id, session, type_name, - type_version, show_level=show_level) - _validate_transition(artifact.state, - values.get('state') or artifact.state) - else: - # create new artifact - artifact = models.Artifact() - if 'id' not in values: - artifact.id = str(uuid.uuid4()) - else: - artifact.id = values['id'] - - if 'tags' in values: - tags = values.pop('tags') - artifact.tags = _do_tags(artifact, tags) - - if 'properties' in values: - properties = values.pop('properties', {}) - artifact.properties = _do_properties(artifact, properties) - - if 'blobs' in values: - blobs = values.pop('blobs') - artifact.blobs = _do_blobs(artifact, blobs) - - if 'dependencies' in values: - dependencies = values.pop('dependencies') - _do_dependencies(artifact, dependencies, session) - - if values.get('state', None) == 'publish': - artifact.dependencies.extend( - _do_transitive_dependencies(artifact, session)) - - artifact.update(values) - try: - artifact.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.warn(_LW("Artifact with the specified type, name and version " - "already exists")) - raise exception.ArtifactDuplicateNameTypeVersion() - - return artifact - - -def get(context, artifact_id, session, type_name=None, type_version=None, - show_level=ga.Showlevel.BASIC): - artifact = _get(context, artifact_id, session, type_name, type_version, - show_level) - return _out(artifact, show_level) - - -def publish(context, artifact_id, session, type_name, - type_version=None): - """ - Because transitive dependencies are not initially created it has to be done - manually by calling this function. - It creates transitive dependencies for the given artifact_id and saves - them in DB. - :returns: artifact dict with Transitive show level - """ - values = {'state': 'active'} - return _out(_create_or_update(context, values, artifact_id, session, - type_name, type_version)) - - -def _validate_transition(source_state, target_state): - if target_state == source_state: - return - try: - source_state = State(source_state) - target_state = State(target_state) - except ValueError: - raise exception.InvalidArtifactStateTransition(source=source_state, - target=target_state) - if (source_state not in TRANSITIONS or - target_state not in TRANSITIONS[source_state]): - raise exception.InvalidArtifactStateTransition(source=source_state, - target=target_state) - - -def _out(artifact, show_level=ga.Showlevel.BASIC, show_text_properties=True): - """ - Transforms sqlalchemy object into dict depending on the show level. - - :param artifact: sql - :param show_level: constant from Showlevel class - :param show_text_properties: for performance optimization it's possible - to disable loading of massive text properties - :returns: generated dict - """ - res = artifact.to_dict(show_level=show_level, - show_text_properties=show_text_properties) - - if show_level >= ga.Showlevel.DIRECT: - dependencies = artifact.dependencies - dependencies.sort(key=lambda elem: (elem.artifact_origin, - elem.name, elem.position)) - res['dependencies'] = {} - if show_level == ga.Showlevel.DIRECT: - new_show_level = ga.Showlevel.BASIC - else: - new_show_level = ga.Showlevel.TRANSITIVE - for dep in dependencies: - if dep.artifact_origin == artifact.id: - # make array - for p in res['dependencies'].keys(): - if p == dep.name: - # add value to array - res['dependencies'][p].append( - _out(dep.dest, new_show_level)) - break - else: - # create new array - deparr = [_out(dep.dest, new_show_level)] - res['dependencies'][dep.name] = deparr - return res - - -def _get(context, artifact_id, session, type_name=None, type_version=None, - show_level=ga.Showlevel.BASIC): - values = dict(id=artifact_id) - if type_name is not None: - values['type_name'] = type_name - if type_version is not None: - values['type_version'] = type_version - _set_version_fields(values) - try: - if show_level == ga.Showlevel.NONE: - query = ( - session.query(models.Artifact). - options(joinedload(models.Artifact.tags)). - filter_by(**values)) - else: - query = ( - session.query(models.Artifact). - options(joinedload(models.Artifact.properties)). - options(joinedload(models.Artifact.tags)). - options(joinedload(models.Artifact.blobs). - joinedload(models.ArtifactBlob.locations)). - filter_by(**values)) - - artifact = query.one() - except orm.exc.NoResultFound: - LOG.warn(_LW("Artifact with id=%s not found") % artifact_id) - raise exception.ArtifactNotFound(id=artifact_id) - if not _check_visibility(context, artifact): - LOG.warn(_LW("Artifact with id=%s is not accessible") % artifact_id) - raise exception.ArtifactForbidden(id=artifact_id) - return artifact - - -def get_all(context, session, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None, - show_level=ga.Showlevel.NONE): - """List all visible artifacts""" - - filters = filters or {} - - artifacts = _get_all( - context, session, filters, marker, - limit, sort_keys, sort_dirs, show_level) - - return [_out(ns, show_level, show_text_properties=False) - for ns in artifacts] - - -def _get_all(context, session, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - show_level=ga.Showlevel.NONE): - """Get all namespaces that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: namespace id after which to start page - :param limit: maximum number of namespaces to return - :param sort_keys: namespace attributes by which results should be sorted - :param sort_dirs: directions in which results should be sorted (asc, desc) - """ - - filters = filters or {} - - query = _do_artifacts_query(context, session, show_level) - basic_conds, tag_conds, prop_conds = _do_query_filters(filters) - - if basic_conds: - for basic_condition in basic_conds: - query = query.filter(and_(*basic_condition)) - - if tag_conds: - for tag_condition in tag_conds: - query = query.join(models.ArtifactTag, aliased=True).filter( - and_(*tag_condition)) - - if prop_conds: - for prop_condition in prop_conds: - query = query.join(models.ArtifactProperty, aliased=True).filter( - and_(*prop_condition)) - - marker_artifact = None - if marker is not None: - marker_artifact = _get(context, marker, session, None, None) - - if sort_keys is None: - sort_keys = [('created_at', None), ('id', None)] - sort_dirs = ['desc', 'desc'] - else: - for key in [('created_at', None), ('id', None)]: - if key not in sort_keys: - sort_keys.append(key) - sort_dirs.append('desc') - - # Note(mfedosin): Workaround to deal with situation that sqlalchemy cannot - # work with composite keys correctly - if ('version', None) in sort_keys: - i = sort_keys.index(('version', None)) - version_sort_dir = sort_dirs[i] - sort_keys[i:i + 1] = [('version_prefix', None), - ('version_suffix', None), - ('version_meta', None)] - sort_dirs[i:i + 1] = [version_sort_dir] * 3 - - query = _do_paginate_query(query=query, - limit=limit, - sort_keys=sort_keys, - marker=marker_artifact, - sort_dirs=sort_dirs) - - return query.all() - - -def _do_paginate_query(query, sort_keys=None, sort_dirs=None, - marker=None, limit=None): - # Default the sort direction to ascending - sort_dir = 'asc' - - # Ensure a per-column sort direction - if sort_dirs is None: - sort_dirs = [sort_dir] * len(sort_keys) - - assert(len(sort_dirs) == len(sort_keys)) # nosec - # nosec: This function runs safely if the assertion fails. - if len(sort_dirs) < len(sort_keys): - sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs)) - - # Add sorting - for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): - try: - sort_dir_func = { - 'asc': sqlalchemy.asc, - 'desc': sqlalchemy.desc, - }[current_sort_dir] - except KeyError: - raise ValueError(_LE("Unknown sort direction, " - "must be 'desc' or 'asc'")) - - if current_sort_key[1] is None: - # sort by generic property - query = query.order_by(sort_dir_func(getattr( - models.Artifact, - current_sort_key[0]))) - else: - # sort by custom property - prop_type = current_sort_key[1] + "_value" - query = ( - query.join(models.ArtifactProperty). - filter(models.ArtifactProperty.name == current_sort_key[0]). - order_by(sort_dir_func(getattr(models.ArtifactProperty, - prop_type)))) - - default = '' - - # Add pagination - if marker is not None: - marker_values = [] - for sort_key in sort_keys: - v = getattr(marker, sort_key[0]) - if v is None: - v = default - marker_values.append(v) - - # Build up an array of sort criteria as in the docstring - criteria_list = [] - for i in range(len(sort_keys)): - crit_attrs = [] - if marker_values[i] is None: - continue - for j in range(i): - if sort_keys[j][1] is None: - model_attr = getattr(models.Artifact, sort_keys[j][0]) - else: - model_attr = getattr(models.ArtifactProperty, - sort_keys[j][1] + "_value") - default = None if isinstance( - model_attr.property.columns[0].type, - sqlalchemy.DateTime) else '' - attr = case([(model_attr != None, - model_attr), ], - else_=default) - crit_attrs.append((attr == marker_values[j])) - - if sort_keys[i][1] is None: - model_attr = getattr(models.Artifact, sort_keys[i][0]) - else: - model_attr = getattr(models.ArtifactProperty, - sort_keys[i][1] + "_value") - - default = None if isinstance(model_attr.property.columns[0].type, - sqlalchemy.DateTime) else '' - attr = case([(model_attr != None, - model_attr), ], - else_=default) - - if sort_dirs[i] == 'desc': - crit_attrs.append((attr < marker_values[i])) - else: - crit_attrs.append((attr > marker_values[i])) - - criteria = and_(*crit_attrs) - criteria_list.append(criteria) - - f = or_(*criteria_list) - query = query.filter(f) - - if limit is not None: - query = query.limit(limit) - - return query - - -def _do_artifacts_query(context, session, show_level=ga.Showlevel.NONE): - """Build the query to get all artifacts based on the context""" - - LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", - {'is_admin': context.is_admin, 'owner': context.owner}) - - if show_level == ga.Showlevel.NONE: - query = session.query(models.Artifact).options( - joinedload(models.Artifact.tags)) - elif show_level == ga.Showlevel.BASIC: - query = ( - session.query(models.Artifact). - options(joinedload( - models.Artifact.properties). - defer(models.ArtifactProperty.text_value)). - options(joinedload(models.Artifact.tags)). - options(joinedload(models.Artifact.blobs). - joinedload(models.ArtifactBlob.locations))) - else: - # other show_levels aren't supported - msg = _LW("Show level %s is not supported in this " - "operation") % ga.Showlevel.to_str(show_level) - LOG.warn(msg) - raise exception.ArtifactUnsupportedShowLevel(shl=show_level) - - # If admin, return everything. - if context.is_admin: - return query - else: - # If regular user, return only public artifacts. - # However, if context.owner has a value, return both - # public and private artifacts of the context.owner. - if context.owner is not None: - query = query.filter( - or_(models.Artifact.owner == context.owner, - models.Artifact.visibility == 'public')) - else: - query = query.filter( - models.Artifact.visibility == 'public') - return query - -op_mappings = { - 'EQ': operator.eq, - 'GT': operator.gt, - 'GE': operator.ge, - 'LT': operator.lt, - 'LE': operator.le, - 'NE': operator.ne, - 'IN': operator.eq # it must be eq -} - - -def _do_query_filters(filters): - basic_conds = [] - tag_conds = [] - prop_conds = [] - - # don't show deleted artifacts - basic_conds.append([models.Artifact.state != 'deleted']) - - visibility = filters.pop('visibility', None) - if visibility is not None: - # ignore operator. always consider it EQ - basic_conds.append( - [models.Artifact.visibility == visibility[0]['value']]) - - type_name = filters.pop('type_name', None) - if type_name is not None: - # ignore operator. always consider it EQ - basic_conds.append([models.Artifact.type_name == type_name['value']]) - type_version = filters.pop('type_version', None) - if type_version is not None: - # ignore operator. always consider it EQ - # TODO(mfedosin) add support of LIKE operator - type_version = semver_db.parse(type_version['value']) - basic_conds.append([models.Artifact.type_version == type_version]) - - name = filters.pop('name', None) - if name is not None: - # ignore operator. always consider it EQ - basic_conds.append([models.Artifact.name == name[0]['value']]) - - versions = filters.pop('version', None) - if versions is not None: - for version in versions: - value = semver_db.parse(version['value']) - op = version['operator'] - fn = op_mappings[op] - basic_conds.append([fn(models.Artifact.version, value)]) - - state = filters.pop('state', None) - if state is not None: - # ignore operator. always consider it EQ - basic_conds.append([models.Artifact.state == state['value']]) - - owner = filters.pop('owner', None) - if owner is not None: - # ignore operator. always consider it EQ - basic_conds.append([models.Artifact.owner == owner[0]['value']]) - - id_list = filters.pop('id_list', None) - if id_list is not None: - basic_conds.append([models.Artifact.id.in_(id_list['value'])]) - - name_list = filters.pop('name_list', None) - if name_list is not None: - basic_conds.append([models.Artifact.name.in_(name_list['value'])]) - - tags = filters.pop('tags', None) - if tags is not None: - for tag in tags: - tag_conds.append([models.ArtifactTag.value == tag['value']]) - - # process remaining filters - for filtername, filtervalues in filters.items(): - for filtervalue in filtervalues: - - db_prop_op = filtervalue['operator'] - db_prop_value = filtervalue['value'] - db_prop_type = filtervalue['type'] + "_value" - db_prop_position = filtervalue.get('position') - - conds = [models.ArtifactProperty.name == filtername] - - if db_prop_op in op_mappings: - fn = op_mappings[db_prop_op] - result = fn(getattr(models.ArtifactProperty, db_prop_type), - db_prop_value) - - cond = [result] - if db_prop_position is not 'any': - cond.append( - models.ArtifactProperty.position == db_prop_position) - if db_prop_op == 'IN': - if (db_prop_position is not None and - db_prop_position is not 'any'): - msg = _LE("Cannot use this parameter with " - "the operator IN") - LOG.error(msg) - raise exception.ArtifactInvalidPropertyParameter( - op='IN') - cond = [result, - models.ArtifactProperty.position >= 0] - else: - msg = _LE("Operator %s is not supported") % db_prop_op - LOG.error(msg) - raise exception.ArtifactUnsupportedPropertyOperator( - op=db_prop_op) - - conds.extend(cond) - - prop_conds.append(conds) - return basic_conds, tag_conds, prop_conds - - -def _do_tags(artifact, new_tags): - tags_to_update = [] - # don't touch existing tags - for tag in artifact.tags: - if tag.value in new_tags: - tags_to_update.append(tag) - new_tags.remove(tag.value) - # add new tags - for tag in new_tags: - db_tag = models.ArtifactTag() - db_tag.value = tag - tags_to_update.append(db_tag) - return tags_to_update - - -def _do_property(propname, prop, position=None): - db_prop = models.ArtifactProperty() - db_prop.name = propname - setattr(db_prop, - (prop['type'] + "_value"), - prop['value']) - db_prop.position = position - return db_prop - - -def _do_properties(artifact, new_properties): - - props_to_update = [] - # don't touch existing properties - for prop in artifact.properties: - if prop.name not in new_properties: - props_to_update.append(prop) - - for propname, prop in new_properties.items(): - if prop['type'] == 'array': - for pos, arrprop in enumerate(prop['value']): - props_to_update.append( - _do_property(propname, arrprop, pos) - ) - else: - props_to_update.append( - _do_property(propname, prop) - ) - return props_to_update - - -def _do_blobs(artifact, new_blobs): - blobs_to_update = [] - - # don't touch existing blobs - for blob in artifact.blobs: - if blob.name not in new_blobs: - blobs_to_update.append(blob) - - for blobname, blobs in new_blobs.items(): - for pos, blob in enumerate(blobs): - for db_blob in artifact.blobs: - if db_blob.name == blobname and db_blob.position == pos: - # update existing blobs - db_blob.size = blob['size'] - db_blob.checksum = blob['checksum'] - db_blob.item_key = blob['item_key'] - db_blob.locations = _do_locations(db_blob, - blob['locations']) - blobs_to_update.append(db_blob) - break - else: - # create new blob - db_blob = models.ArtifactBlob() - db_blob.name = blobname - db_blob.size = blob['size'] - db_blob.checksum = blob['checksum'] - db_blob.item_key = blob['item_key'] - db_blob.position = pos - db_blob.locations = _do_locations(db_blob, blob['locations']) - blobs_to_update.append(db_blob) - return blobs_to_update - - -def _do_locations(blob, new_locations): - locs_to_update = [] - for pos, loc in enumerate(new_locations): - for db_loc in blob.locations: - if db_loc.value == loc['value']: - # update existing location - db_loc.position = pos - db_loc.status = loc['status'] - locs_to_update.append(db_loc) - break - else: - # create new location - db_loc = models.ArtifactBlobLocation() - db_loc.value = loc['value'] - db_loc.status = loc['status'] - db_loc.position = pos - locs_to_update.append(db_loc) - return locs_to_update - - -def _do_dependencies(artifact, new_dependencies, session): - deps_to_update = [] - # small check that all dependencies are new - if artifact.dependencies is not None: - for db_dep in artifact.dependencies: - for dep in new_dependencies.keys(): - if db_dep.name == dep: - msg = _LW("Artifact with the specified type, name " - "and versions already has the direct " - "dependency=%s") % dep - LOG.warn(msg) - # change values of former dependency - for dep in artifact.dependencies: - session.delete(dep) - artifact.dependencies = [] - for depname, depvalues in new_dependencies.items(): - for pos, depvalue in enumerate(depvalues): - db_dep = models.ArtifactDependency() - db_dep.name = depname - db_dep.artifact_source = artifact.id - db_dep.artifact_dest = depvalue - db_dep.artifact_origin = artifact.id - db_dep.is_direct = True - db_dep.position = pos - deps_to_update.append(db_dep) - artifact.dependencies = deps_to_update - - -def _do_transitive_dependencies(artifact, session): - deps_to_update = [] - for dependency in artifact.dependencies: - depvalue = dependency.artifact_dest - transitdeps = session.query(models.ArtifactDependency).filter_by( - artifact_source=depvalue).all() - for transitdep in transitdeps: - if not transitdep.is_direct: - # transitive dependencies are already created - msg = _LW("Artifact with the specified type, " - "name and version already has the " - "direct dependency=%d") % transitdep.id - LOG.warn(msg) - raise exception.ArtifactDuplicateTransitiveDependency( - dep=transitdep.id) - - db_dep = models.ArtifactDependency() - db_dep.name = transitdep['name'] - db_dep.artifact_source = artifact.id - db_dep.artifact_dest = transitdep.artifact_dest - db_dep.artifact_origin = transitdep.artifact_source - db_dep.is_direct = False - db_dep.position = transitdep.position - deps_to_update.append(db_dep) - return deps_to_update - - -def _check_visibility(context, artifact): - if context.is_admin: - return True - - if not artifact.owner: - return True - - if artifact.visibility == Visibility.PUBLIC.value: - return True - - if artifact.visibility == Visibility.PRIVATE.value: - if context.owner and context.owner == artifact.owner: - return True - else: - return False - - if artifact.visibility == Visibility.SHARED.value: - return False - - return False - - -def _set_version_fields(values): - if 'type_version' in values: - values['type_version'] = semver_db.parse(values['type_version']) - if 'version' in values: - values['version'] = semver_db.parse(values['version']) - - -def _validate_values(values): - if 'state' in values: - try: - State(values['state']) - except ValueError: - msg = "Invalid artifact state '%s'" % values['state'] - raise exception.Invalid(msg) - if 'visibility' in values: - try: - Visibility(values['visibility']) - except ValueError: - msg = "Invalid artifact visibility '%s'" % values['visibility'] - raise exception.Invalid(msg) - # TODO(mfedosin): it's an idea to validate tags someday - # (check that all tags match the regexp) - - -def _drop_protected_attrs(model_class, values): - """ - Removed protected attributes from values dictionary using the models - __protected_attributes__ field. - """ - for attr in model_class.__protected_attributes__: - if attr in values: - del values[attr] diff --git a/glance/db/sqlalchemy/models_glare.py b/glance/db/sqlalchemy/models_glare.py deleted file mode 100644 index cc4b27b3cf..0000000000 --- a/glance/db/sqlalchemy/models_glare.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_db.sqlalchemy import models -from sqlalchemy import BigInteger -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy.ext import declarative -from sqlalchemy import ForeignKey -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy import Numeric -from sqlalchemy.orm import backref -from sqlalchemy.orm import composite -from sqlalchemy.orm import relationship -from sqlalchemy import String -from sqlalchemy import Text - -from glance.common import semver_db -from glance.common import timeutils -import glance.glare as ga - -BASE = declarative.declarative_base() - - -class ArtifactBase(models.ModelBase, models.TimestampMixin): - """Base class for Artifact Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} - __table_initialized__ = False - __protected_attributes__ = set([ - "created_at", "updated_at"]) - - created_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=False) - - updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=False, onupdate=lambda: timeutils.utcnow()) - - def save(self, session=None): - from glance.db.sqlalchemy import api as db_api - - super(ArtifactBase, self).save(session or db_api.get_session()) - - def keys(self): - return self.__dict__.keys() - - def values(self): - return self.__dict__.values() - - def items(self): - return self.__dict__.items() - - def to_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - -def _parse_property_type_value(prop, show_text_properties=True): - columns = [ - 'int_value', - 'string_value', - 'bool_value', - 'numeric_value'] - if show_text_properties: - columns.append('text_value') - - for prop_type in columns: - if getattr(prop, prop_type) is not None: - return prop_type.rpartition('_')[0], getattr(prop, prop_type) - - return None, None - - -class Artifact(BASE, ArtifactBase): - __tablename__ = 'artifacts' - __table_args__ = ( - Index('ix_artifact_name_and_version', 'name', 'version_prefix', - 'version_suffix'), - Index('ix_artifact_type', 'type_name', 'type_version_prefix', - 'type_version_suffix'), - Index('ix_artifact_state', 'state'), - Index('ix_artifact_owner', 'owner'), - Index('ix_artifact_visibility', 'visibility'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) - - __protected_attributes__ = ArtifactBase.__protected_attributes__.union( - set(['published_at', 'deleted_at'])) - - id = Column(String(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - name = Column(String(255), nullable=False) - type_name = Column(String(255), nullable=False) - type_version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"), - nullable=False) - type_version_suffix = Column(String(255)) - type_version_meta = Column(String(255)) - type_version = composite(semver_db.DBVersion, type_version_prefix, - type_version_suffix, type_version_meta, - comparator_factory=semver_db.VersionComparator) - version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"), - nullable=False) - version_suffix = Column(String(255)) - version_meta = Column(String(255)) - version = composite(semver_db.DBVersion, version_prefix, - version_suffix, version_meta, - comparator_factory=semver_db.VersionComparator) - description = Column(Text) - visibility = Column(String(32), nullable=False) - state = Column(String(32), nullable=False) - owner = Column(String(255), nullable=False) - published_at = Column(DateTime) - deleted_at = Column(DateTime) - - def to_dict(self, show_level=ga.Showlevel.BASIC, - show_text_properties=True): - d = super(Artifact, self).to_dict() - - d.pop('type_version_prefix') - d.pop('type_version_suffix') - d.pop('type_version_meta') - d.pop('version_prefix') - d.pop('version_suffix') - d.pop('version_meta') - d['type_version'] = str(self.type_version) - d['version'] = str(self.version) - - tags = [] - for tag in self.tags: - tags.append(tag.value) - d['tags'] = tags - - if show_level == ga.Showlevel.NONE: - return d - - properties = {} - - # sort properties - self.properties.sort(key=lambda elem: (elem.name, elem.position)) - - for prop in self.properties: - proptype, propvalue = _parse_property_type_value( - prop, show_text_properties) - if proptype is None: - continue - - if prop.position is not None: - # make array - for p in properties.keys(): - if p == prop.name: - # add value to array - properties[p]['value'].append(dict(type=proptype, - value=propvalue)) - break - else: - # create new array - p = dict(type='array', - value=[]) - p['value'].append(dict(type=proptype, - value=propvalue)) - properties[prop.name] = p - else: - # make scalar - properties[prop.name] = dict(type=proptype, - value=propvalue) - d['properties'] = properties - - blobs = {} - # sort blobs - self.blobs.sort(key=lambda elem: elem.position) - - for blob in self.blobs: - locations = [] - # sort locations - blob.locations.sort(key=lambda elem: elem.position) - for loc in blob.locations: - locations.append(dict(value=loc.value, - status=loc.status)) - if blob.name in blobs: - blobs[blob.name].append(dict(size=blob.size, - checksum=blob.checksum, - locations=locations, - item_key=blob.item_key)) - else: - blobs[blob.name] = [] - blobs[blob.name].append(dict(size=blob.size, - checksum=blob.checksum, - locations=locations, - item_key=blob.item_key)) - - d['blobs'] = blobs - - return d - - -class ArtifactDependency(BASE, ArtifactBase): - __tablename__ = 'artifact_dependencies' - __table_args__ = (Index('ix_artifact_dependencies_source_id', - 'artifact_source'), - Index('ix_artifact_dependencies_origin_id', - 'artifact_origin'), - Index('ix_artifact_dependencies_dest_id', - 'artifact_dest'), - Index('ix_artifact_dependencies_direct_dependencies', - 'artifact_source', 'is_direct'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) - - id = Column(String(36), primary_key=True, nullable=False, - default=lambda: str(uuid.uuid4())) - artifact_source = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - artifact_dest = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - artifact_origin = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - is_direct = Column(Boolean, nullable=False) - position = Column(Integer) - name = Column(String(36)) - - source = relationship('Artifact', - backref=backref('dependencies', cascade="all, " - "delete"), - foreign_keys="ArtifactDependency.artifact_source") - dest = relationship('Artifact', - foreign_keys="ArtifactDependency.artifact_dest") - origin = relationship('Artifact', - foreign_keys="ArtifactDependency.artifact_origin") - - -class ArtifactTag(BASE, ArtifactBase): - __tablename__ = 'artifact_tags' - __table_args__ = (Index('ix_artifact_tags_artifact_id', 'artifact_id'), - Index('ix_artifact_tags_artifact_id_tag_value', - 'artifact_id', 'value'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) - - id = Column(String(36), primary_key=True, nullable=False, - default=lambda: str(uuid.uuid4())) - artifact_id = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - artifact = relationship(Artifact, - backref=backref('tags', - cascade="all, delete-orphan")) - value = Column(String(255), nullable=False) - - -class ArtifactProperty(BASE, ArtifactBase): - __tablename__ = 'artifact_properties' - __table_args__ = ( - Index('ix_artifact_properties_artifact_id', 'artifact_id'), - Index('ix_artifact_properties_name', 'name'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) - id = Column(String(36), primary_key=True, nullable=False, - default=lambda: str(uuid.uuid4())) - artifact_id = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - artifact = relationship(Artifact, - backref=backref('properties', - cascade="all, delete-orphan")) - name = Column(String(255), nullable=False) - string_value = Column(String(255)) - int_value = Column(Integer) - numeric_value = Column(Numeric) - bool_value = Column(Boolean) - text_value = Column(Text) - position = Column(Integer) - - -class ArtifactBlob(BASE, ArtifactBase): - __tablename__ = 'artifact_blobs' - __table_args__ = ( - Index('ix_artifact_blobs_artifact_id', 'artifact_id'), - Index('ix_artifact_blobs_name', 'name'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) - id = Column(String(36), primary_key=True, nullable=False, - default=lambda: str(uuid.uuid4())) - artifact_id = Column(String(36), ForeignKey('artifacts.id'), - nullable=False) - name = Column(String(255), nullable=False) - item_key = Column(String(329)) - size = Column(BigInteger().with_variant(Integer, "sqlite"), - nullable=False) - checksum = Column(String(32)) - position = Column(Integer) - artifact = relationship(Artifact, - backref=backref('blobs', - cascade="all, delete-orphan")) - - -class ArtifactBlobLocation(BASE, ArtifactBase): - __tablename__ = 'artifact_blob_locations' - __table_args__ = (Index('ix_artifact_blob_locations_blob_id', - 'blob_id'), - {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) - - id = Column(String(36), primary_key=True, nullable=False, - default=lambda: str(uuid.uuid4())) - blob_id = Column(String(36), ForeignKey('artifact_blobs.id'), - nullable=False) - value = Column(Text, nullable=False) - position = Column(Integer) - status = Column(String(36), default='active', nullable=True) - blob = relationship(ArtifactBlob, - backref=backref('locations', - cascade="all, delete-orphan")) - - -def register_models(engine): - """Create database tables for all models with the given engine.""" - models = (Artifact, ArtifactTag, ArtifactProperty, - ArtifactBlob, ArtifactBlobLocation, ArtifactDependency) - for model in models: - model.metadata.create_all(engine) - - -def unregister_models(engine): - """Drop database tables for all models with the given engine.""" - models = (ArtifactDependency, ArtifactBlobLocation, ArtifactBlob, - ArtifactProperty, ArtifactTag, Artifact) - for model in models: - model.metadata.drop_all(engine) diff --git a/glance/glare/__init__.py b/glance/glare/__init__.py deleted file mode 100644 index 75b43ece85..0000000000 --- a/glance/glare/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from glance.common import exception - - -class Showlevel(object): - # None - do not show additional properties and blobs with locations; - # Basic - show all artifact fields except dependencies; - # Direct - show all artifact fields with only direct dependencies; - # Transitive - show all artifact fields with all of dependencies. - NONE = 0 - BASIC = 1 - DIRECT = 2 - TRANSITIVE = 3 - - _level_map = {'none': NONE, 'basic': BASIC, 'direct': DIRECT, - 'transitive': TRANSITIVE} - _inverted_level_map = {v: k for k, v in six.iteritems(_level_map)} - - @staticmethod - def to_str(n): - try: - return Showlevel._inverted_level_map[n] - except KeyError: - raise exception.ArtifactUnsupportedShowLevel() - - @staticmethod - def from_str(str_value): - try: - return Showlevel._level_map[str_value] - except KeyError: - raise exception.ArtifactUnsupportedShowLevel() diff --git a/glance/glare/dependency.py b/glance/glare/dependency.py deleted file mode 100644 index bf76e5211b..0000000000 --- a/glance/glare/dependency.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glance.common.exception as exc -import glance.common.glare.definitions as definitions -from glance.glare.domain import proxy -from glance.i18n import _ - - -class ArtifactProxy(proxy.Artifact): - def __init__(self, artifact, repo): - super(ArtifactProxy, self).__init__(artifact) - self.artifact = artifact - self.repo = repo - - def set_type_specific_property(self, prop_name, value): - if prop_name not in self.metadata.attributes.dependencies: - return super(ArtifactProxy, self).set_type_specific_property( - prop_name, value) - # for every dependency have to transfer dep_id into a dependency itself - if value is None: - setattr(self.artifact, prop_name, None) - else: - if not isinstance(value, list): - setattr(self.artifact, prop_name, - self._fetch_dependency(value)) - else: - setattr(self.artifact, prop_name, - [self._fetch_dependency(dep_id) for dep_id in value]) - - def _fetch_dependency(self, dep_id): - # check for circular dependency id -> id - if self.id == dep_id: - raise exc.ArtifactCircularDependency() - art = self.repo.get(artifact_id=dep_id) - - # repo returns a proxy of some level. - # Need to find the base declarative artifact - while not isinstance(art, definitions.ArtifactType): - art = art.base - return art - - -class ArtifactRepo(proxy.ArtifactRepo): - def __init__(self, repo, plugins, - item_proxy_class=None, item_proxy_kwargs=None): - self.plugins = plugins - super(ArtifactRepo, self).__init__(repo, - item_proxy_class=ArtifactProxy, - item_proxy_kwargs={'repo': self}) - - def _check_dep_state(self, dep, state): - """Raises an exception if dependency 'dep' is not in state 'state'""" - if dep.state != state: - raise exc.Invalid(_( - "Not all dependencies are in '%s' state") % state) - - def publish(self, artifact, *args, **kwargs): - """ - Creates transitive dependencies, - checks that all dependencies are in active state and - transfers artifact from creating to active state - """ - # make sure that all required dependencies exist - artifact.__pre_publish__(*args, **kwargs) - # make sure that all dependencies are active - for param in artifact.metadata.attributes.dependencies: - dependency = getattr(artifact, param) - if isinstance(dependency, list): - for dep in dependency: - self._check_dep_state(dep, 'active') - elif dependency: - self._check_dep_state(dependency, 'active') - # as state is changed on db save, have to retrieve the freshly changed - # artifact (the one passed into the func will have old state value) - artifact = self.base.publish(self.helper.unproxy(artifact)) - - return self.helper.proxy(artifact) - - def remove(self, artifact): - """ - Checks that artifact has no dependencies and removes it. - Otherwise an exception is raised - """ - for param in artifact.metadata.attributes.dependencies: - if getattr(artifact, param): - raise exc.Invalid(_( - "Dependency property '%s' has to be deleted first") % - param) - return self.base.remove(self.helper.unproxy(artifact)) - - -class ArtifactFactory(proxy.ArtifactFactory): - def __init__(self, base, klass, repo): - self.klass = klass - self.repo = repo - super(ArtifactFactory, self).__init__( - base, artifact_proxy_class=ArtifactProxy, - artifact_proxy_kwargs={'repo': self.repo}) - - def new_artifact(self, *args, **kwargs): - """ - Creates an artifact without dependencies first - and then adds them to the newly created artifact - """ - # filter dependencies - no_deps = {p: kwargs[p] for p in kwargs - if p not in self.klass.metadata.attributes.dependencies} - deps = {p: kwargs[p] for p in kwargs - if p in self.klass.metadata.attributes.dependencies} - artifact = super(ArtifactFactory, self).new_artifact(*args, **no_deps) - # now set dependencies - for dep_param, dep_value in deps.items(): - setattr(artifact, dep_param, dep_value) - return artifact diff --git a/glance/glare/domain/__init__.py b/glance/glare/domain/__init__.py deleted file mode 100644 index a25d56028c..0000000000 --- a/glance/glare/domain/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from glance.common import timeutils -from glance.i18n import _ - - -class Artifact(object): - - def __init__(self, id, name, version, type_name, type_version, - state, owner, visibility='private', created_at=None, - updated_at=None, **kwargs): - self.id = id - self.name = name - self.type_name = type_name - self.version = version - self.type_version = type_version - self.visibility = visibility - self.state = state - self.owner = owner - self.created_at = created_at - self.updated_at = updated_at - self.description = kwargs.pop('description', None) - self.blobs = kwargs.pop('blobs', {}) - self.properties = kwargs.pop('properties', {}) - self.dependencies = kwargs.pop('dependencies', {}) - self.tags = kwargs.pop('tags', []) - - if kwargs: - message = _("__init__() got unexpected keyword argument '%s'") - raise TypeError(message % list(kwargs.keys())[0]) - - -class ArtifactFactory(object): - def __init__(self, context, klass): - self.klass = klass - self.context = context - - def new_artifact(self, name, version, **kwargs): - id = kwargs.pop('id', str(uuid.uuid4())) - tags = kwargs.pop('tags', []) - # pop reserved fields from kwargs dict - for param in ['owner', 'created_at', 'updated_at', - 'deleted_at', 'state']: - kwargs.pop(param, '') - curr_timestamp = timeutils.utcnow() - base = self.klass(id=id, - name=name, - version=version, - state='creating', - owner=self.context.owner or '', - created_at=curr_timestamp, - updated_at=curr_timestamp, - tags=tags, - **kwargs) - return base diff --git a/glance/glare/domain/proxy.py b/glance/glare/domain/proxy.py deleted file mode 100644 index 35591be5d3..0000000000 --- a/glance/glare/domain/proxy.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from glance.common import exception as exc -from glance.domain import proxy as image_proxy - - -def _proxy_artifact_property(attr): - def getter(self): - return self.get_type_specific_property(attr) - - def setter(self, value): - return self.set_type_specific_property(attr, value) - - return property(getter, setter) - - -class ArtifactHelper(image_proxy.Helper): - """ - Artifact-friendly proxy helper: does all the same as regular helper - but also dynamically proxies all the type-specific attributes, - including properties, blobs and dependencies - """ - def proxy(self, obj): - if obj is None or self.proxy_class is None: - return obj - if not hasattr(obj, 'metadata'): - return super(ArtifactHelper, self).proxy(obj) - extra_attrs = {} - for att_name in obj.metadata.attributes.all.keys(): - extra_attrs[att_name] = _proxy_artifact_property(att_name) - new_proxy_class = type("%s(%s)" % (obj.metadata.type_name, - self.proxy_class.__module__), - (self.proxy_class,), - extra_attrs) - return new_proxy_class(obj, **self.proxy_kwargs) - - -class ArtifactRepo(object): - def __init__(self, base, proxy_helper=None, item_proxy_class=None, - item_proxy_kwargs=None): - self.base = base - if proxy_helper is None: - proxy_helper = ArtifactHelper(item_proxy_class, item_proxy_kwargs) - self.helper = proxy_helper - - def get(self, *args, **kwargs): - return self.helper.proxy(self.base.get(*args, **kwargs)) - - def list(self, *args, **kwargs): - items = self.base.list(*args, **kwargs) - return [self.helper.proxy(item) for item in items] - - def add(self, item): - base_item = self.helper.unproxy(item) - result = self.base.add(base_item) - return self.helper.proxy(result) - - def save(self, item): - base_item = self.helper.unproxy(item) - result = self.base.save(base_item) - return self.helper.proxy(result) - - def remove(self, item): - base_item = self.helper.unproxy(item) - result = self.base.remove(base_item) - return self.helper.proxy(result) - - def publish(self, item, *args, **kwargs): - base_item = self.helper.unproxy(item) - result = self.base.publish(base_item, *args, **kwargs) - return self.helper.proxy(result) - - -class Artifact(object): - def __init__(self, base, proxy_class=None, proxy_kwargs=None): - self.base = base - self.helper = ArtifactHelper(proxy_class, proxy_kwargs) - - # it is enough to proxy metadata only, other properties will be proxied - # automatically by ArtifactHelper - metadata = _proxy_artifact_property('metadata') - - def set_type_specific_property(self, prop_name, value): - setattr(self.base, prop_name, value) - - def get_type_specific_property(self, prop_name): - try: - return getattr(self.base, prop_name) - except AttributeError: - raise exc.ArtifactInvalidProperty(prop=prop_name) - - def __pre_publish__(self, *args, **kwargs): - self.base.__pre_publish__(*args, **kwargs) - - -class ArtifactFactory(object): - def __init__(self, base, - artifact_proxy_class=Artifact, - artifact_proxy_kwargs=None): - self.artifact_helper = ArtifactHelper(artifact_proxy_class, - artifact_proxy_kwargs) - self.base = base - - def new_artifact(self, *args, **kwargs): - t = self.base.new_artifact(*args, **kwargs) - return self.artifact_helper.proxy(t) - - -class ArtifactBlob(object): - def __init__(self, base, artifact_blob_proxy_class=None, - artifact_blob_proxy_kwargs=None): - self.base = base - self.helper = image_proxy.Helper(artifact_blob_proxy_class, - artifact_blob_proxy_kwargs) - - size = _proxy_artifact_property('size') - locations = _proxy_artifact_property('locations') - checksum = _proxy_artifact_property('checksum') - item_key = _proxy_artifact_property('item_key') - - def set_type_specific_property(self, prop_name, value): - setattr(self.base, prop_name, value) - - def get_type_specific_property(self, prop_name): - return getattr(self.base, prop_name) - - def to_dict(self): - return self.base.to_dict() - - -class ArtifactProperty(object): - def __init__(self, base, proxy_class=None, proxy_kwargs=None): - self.base = base - self.helper = ArtifactHelper(proxy_class, proxy_kwargs) - - def set_type_specific_property(self, prop_name, value): - setattr(self.base, prop_name, value) - - def get_type_specific_property(self, prop_name): - return getattr(self.base, prop_name) - - -class List(collections.MutableSequence): - def __init__(self, base, item_proxy_class=None, - item_proxy_kwargs=None): - self.base = base - self.helper = image_proxy.Helper(item_proxy_class, item_proxy_kwargs) - - def __len__(self): - return len(self.base) - - def __delitem__(self, index): - del self.base[index] - - def __getitem__(self, index): - item = self.base[index] - return self.helper.proxy(item) - - def insert(self, index, value): - self.base.insert(index, self.helper.unproxy(value)) - - def __setitem__(self, index, value): - self.base[index] = self.helper.unproxy(value) - - -class Dict(collections.MutableMapping): - def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): - self.base = base - self.helper = image_proxy.Helper(item_proxy_class, item_proxy_kwargs) - - def __setitem__(self, key, value): - self.base[key] = self.helper.unproxy(value) - - def __getitem__(self, key): - item = self.base[key] - return self.helper.proxy(item) - - def __delitem__(self, key): - del self.base[key] - - def __len__(self): - return len(self.base) - - def __iter__(self): - for key in self.base.keys(): - yield key diff --git a/glance/glare/gateway.py b/glance/glare/gateway.py deleted file mode 100644 index 398b1e1279..0000000000 --- a/glance/glare/gateway.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store - -from glance.common import store_utils -import glance.db -from glance.glare import dependency -from glance.glare import domain -from glance.glare import location -from glance.glare import updater - - -class Gateway(object): - def __init__(self, db_api=None, store_api=None, plugins=None): - self.db_api = db_api or glance.db.get_api() - self.store_api = store_api or glance_store - self.store_utils = store_utils - self.plugins = plugins - - def get_artifact_type_factory(self, context, klass): - declarative_factory = domain.ArtifactFactory(context, klass) - repo = self.get_artifact_repo(context) - dependencies_factory = dependency.ArtifactFactory(declarative_factory, - klass, repo) - factory = location.ArtifactFactoryProxy(dependencies_factory, - context, - self.store_api, - self.store_utils) - updater_factory = updater.ArtifactFactoryProxy(factory) - return updater_factory - - def get_artifact_repo(self, context): - artifact_repo = glance.db.ArtifactRepo(context, - self.db_api, - self.plugins) - dependencies_repo = dependency.ArtifactRepo(artifact_repo, - self.plugins) - repo = location.ArtifactRepoProxy(dependencies_repo, - context, - self.store_api, - self.store_utils) - updater_repo = updater.ArtifactRepoProxy(repo) - return updater_repo diff --git a/glance/glare/location.py b/glance/glare/location.py deleted file mode 100644 index 8ece66dffa..0000000000 --- a/glance/glare/location.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils - -from glance.common.glare import definitions -from glance.common import utils -from glance.glare.domain import proxy -from glance.i18n import _LE, _LW - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class ArtifactFactoryProxy(proxy.ArtifactFactory): - def __init__(self, factory, context, store_api, store_utils): - self.context = context - self.store_api = store_api - self.store_utils = store_utils - proxy_kwargs = {'store_api': store_api, - 'store_utils': store_utils, - 'context': self.context} - super(ArtifactFactoryProxy, self).__init__( - factory, - artifact_proxy_class=ArtifactProxy, - artifact_proxy_kwargs=proxy_kwargs) - - -class ArtifactProxy(proxy.Artifact): - def __init__(self, artifact, context, store_api, store_utils): - self.artifact = artifact - self.context = context - self.store_api = store_api - self.store_utils = store_utils - super(ArtifactProxy, - self).__init__(artifact, - proxy_class=ArtifactBlobProxy, - proxy_kwargs={"context": self.context, - "store_api": self.store_api}) - - def set_type_specific_property(self, prop_name, value): - if prop_name not in self.artifact.metadata.attributes.blobs: - super(ArtifactProxy, self).set_type_specific_property(prop_name, - value) - return - item_key = "%s.%s" % (self.artifact.id, prop_name) - # XXX FIXME have to add support for BinaryObjectList properties - blob = definitions.Blob(item_key=item_key) - blob_proxy = self.helper.proxy(blob) - - if value is None: - for location in blob_proxy.locations: - blob_proxy.delete_from_store(location) - else: - data = value[0] - size = value[1] - blob_proxy.upload_to_store(data, size) - setattr(self.artifact, prop_name, blob) - - def get_type_specific_property(self, prop_name): - base = super(ArtifactProxy, self).get_type_specific_property(prop_name) - if base is None: - return None - if prop_name in self.artifact.metadata.attributes.blobs: - if isinstance(self.artifact.metadata.attributes.blobs[prop_name], - list): - return ArtifactBlobProxyList(self.artifact.id, - prop_name, - base, - self.context, - self.store_api) - else: - return self.helper.proxy(base) - else: - return base - - -class ArtifactRepoProxy(proxy.ArtifactRepo): - def __init__(self, artifact_repo, context, store_api, store_utils): - self.context = context - self.store_api = store_api - proxy_kwargs = {'context': context, 'store_api': store_api, - 'store_utils': store_utils} - super(ArtifactRepoProxy, self).__init__( - artifact_repo, - proxy_helper=proxy.ArtifactHelper(ArtifactProxy, proxy_kwargs)) - - def get(self, *args, **kwargs): - return self.helper.proxy(self.base.get(*args, **kwargs)) - - -class ArtifactBlobProxy(proxy.ArtifactBlob): - def __init__(self, blob, context, store_api): - self.context = context - self.store_api = store_api - self.blob = blob - super(ArtifactBlobProxy, self).__init__(blob) - - def delete_from_store(self, location): - try: - ret = self.store_api.delete_from_backend(location['value'], - context=self.context) - location['status'] = 'deleted' - return ret - except self.store_api.NotFound: - msg = _LW('Failed to delete blob' - ' %s in store from URI') % self.blob.id - LOG.warn(msg) - except self.store_api.StoreDeleteNotSupported as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - except self.store_api.UnsupportedBackend: - exc_type = sys.exc_info()[0].__name__ - msg = (_LE('Failed to delete blob' - ' %(blob_id)s from store: %(exc)s') % - dict(blob_id=self.blob.id, exc=exc_type)) - LOG.error(msg) - - def upload_to_store(self, data, size): - if size is None: # NOTE(ativelkov): None is "unknown size" - size = 0 - location, ret_size, checksum, loc_meta = self.store_api.add_to_backend( - CONF, - self.blob.item_key, - utils.LimitingReader(utils.CooperativeReader(data), - CONF.image_size_cap), - size, - context=self.context) - self.blob.size = ret_size - self.blob.locations = [{'status': 'active', 'value': location}] - self.blob.checksum = checksum - - @property - def data_stream(self): - if len(self.locations) > 0: - err = None - try: - for location in self.locations: - data, size = self.store_api.get_from_backend( - location['value'], - context=self.context) - return data - except Exception as e: - LOG.warn(_LW('Get blob %(name)s data failed: ' - '%(err)s.') - % {'name': self.blob.item_key, - 'err': encodeutils.exception_to_unicode(e)}) - err = e - - # tried all locations - LOG.error(_LE('Glance tried all active locations to get data ' - 'for blob %s ' - 'but all have failed.') % self.blob.item_key) - raise err - - -class ArtifactBlobProxyList(proxy.List): - def __init__(self, artifact_id, prop_name, bloblist, context, store_api): - self.artifact_id = artifact_id - self.prop_name = prop_name - self.context = context - self.store_api = store_api - super(ArtifactBlobProxyList, - self).__init__(bloblist, - item_proxy_class=ArtifactBlobProxy, - item_proxy_kwargs={'context': context, - 'store_api': store_api}) - - def insert(self, index, value): - data = value[0] - size = value[1] - item_key = "%s.%s.%s" % (self.artifact_id, self.prop_name, - uuid.uuid4()) - blob = definitions.Blob(item_key=item_key) - blob_proxy = self.helper.proxy(blob) - blob_proxy.upload_to_store(data, size) - super(ArtifactBlobProxyList, self).insert(index, blob_proxy) - - def __setitem__(self, index, value): - blob = self[index] - data = value[0] - size = value[1] - blob.upload_to_store(data, size) diff --git a/glance/glare/updater.py b/glance/glare/updater.py deleted file mode 100644 index 1287077cc6..0000000000 --- a/glance/glare/updater.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import exception as exc -from glance.glare.domain import proxy -from glance.i18n import _ - - -class ArtifactProxy(proxy.Artifact): - """A proxy that is capable of modifying an artifact via jsonpatch methods. - - Currently supported methods are update, remove, replace. - """ - def __init__(self, artifact): - self.artifact = artifact - super(ArtifactProxy, self).__init__(artifact) - - def __getattr__(self, name): - if not hasattr(self, name): - raise exc.ArtifactInvalidProperty(prop=name) - return super(ArtifactProxy, self).__getattr__(name) - - def _perform_op(self, op, **kwargs): - path = kwargs.get("path") - value = kwargs.get("value") - prop_name, delimiter, path_left = path.lstrip('/').partition('/') - super(ArtifactProxy, self).get_type_specific_property(prop_name) - if not path_left: - return setattr(self, prop_name, value) - try: - prop = self._get_prop_to_update(prop_name, path_left) - # correct path_left and call corresponding update method - kwargs["path"] = path_left - getattr(prop, op)(path=kwargs["path"], value=kwargs.get("value")) - return setattr(self, prop_name, prop) - except exc.InvalidJsonPatchPath: - # NOTE(ivasilevskaya): here exception is reraised with - # 'part of path' substituted with with 'full path' to form a - # more relevant message - raise exc.InvalidJsonPatchPath( - path=path, explanation=_("No property to access")) - - def _get_prop_to_update(self, prop_name, path): - """Proxies properties that can be modified via update request. - - All properties can be updated save for 'metadata' and blobs. - Due to the fact that empty lists and dicts are represented with null - values, have to check precise type definition by consulting metadata. - """ - prop = super(ArtifactProxy, self).get_type_specific_property( - prop_name) - if (prop_name == "metadata" or - prop_name in self.artifact.metadata.attributes.blobs): - return prop - if not prop: - # get correct type for empty list/dict - klass = self.artifact.metadata.attributes.all[prop_name] - if isinstance(klass, list): - prop = [] - elif isinstance(klass, dict): - prop = {} - return wrap_property(prop, path) - - def replace(self, path, value): - self._perform_op("replace", path=path, value=value) - - def remove(self, path, value=None): - self._perform_op("remove", path=path) - - def add(self, path, value): - self._perform_op("add", path=path, value=value) - - -class ArtifactFactoryProxy(proxy.ArtifactFactory): - def __init__(self, factory): - super(ArtifactFactoryProxy, self).__init__(factory) - - -class ArtifactRepoProxy(proxy.ArtifactRepo): - def __init__(self, repo): - super(ArtifactRepoProxy, self).__init__( - repo, item_proxy_class=ArtifactProxy) - - -def wrap_property(prop_value, full_path): - if isinstance(prop_value, list): - return ArtifactListPropertyProxy(prop_value, full_path) - if isinstance(prop_value, dict): - return ArtifactDictPropertyProxy(prop_value, full_path) - # no other types are supported - raise exc.InvalidJsonPatchPath(path=full_path) - - -class ArtifactListPropertyProxy(proxy.List): - """A class to wrap a list property. - - Makes possible to modify the property value via supported jsonpatch - requests (update/remove/replace). - """ - def __init__(self, prop_value, path): - super(ArtifactListPropertyProxy, self).__init__( - prop_value) - - def _proc_key(self, idx_str, should_exist=True): - """JsonPatchUpdateMixin method overload. - - Only integers less than current array length and '-' (last elem) - in path are allowed. - Raises an InvalidJsonPatchPath exception if any of the conditions above - are not met. - """ - if idx_str == '-': - return len(self) - 1 - try: - idx = int(idx_str) - if not should_exist and len(self) == 0: - return 0 - if len(self) < idx + 1: - msg = _("Array has no element at position %d") % idx - raise exc.InvalidJsonPatchPath(explanation=msg, path=idx) - return idx - except (ValueError, TypeError): - msg = _("Not an array idx '%s'") % idx_str - raise exc.InvalidJsonPatchPath(explanation=msg, path=idx_str) - - def add(self, path, value): - # by now arrays can't contain complex structures (due to Declarative - # Framework limitations and DB storage model), - # so will 'path' == idx equality is implied. - idx = self._proc_key(path, False) - if idx == len(self) - 1: - self.append(value) - else: - self.insert(idx, value) - return self.base - - def remove(self, path, value=None): - # by now arrays can't contain complex structures, so will imply that - # 'path' == idx [see comment for add()] - del self[self._proc_key(path)] - return self.base - - def replace(self, path, value): - # by now arrays can't contain complex structures, so will imply that - # 'path' == idx [see comment for add()] - self[self._proc_key(path)] = value - return self.base - - -class ArtifactDictPropertyProxy(proxy.Dict): - """A class to wrap a dict property. - - Makes possible to modify the property value via supported jsonpatch - requests (update/remove/replace). - """ - def __init__(self, prop_value, path): - super(ArtifactDictPropertyProxy, self).__init__( - prop_value) - - def _proc_key(self, key_str, should_exist=True): - """JsonPatchUpdateMixin method overload""" - if should_exist and key_str not in self.keys(): - msg = _("No such key '%s' in a dict") % key_str - raise exc.InvalidJsonPatchPath(path=key_str, explanation=msg) - return key_str - - def replace(self, path, value): - start, delimiter, rest = path.partition('/') - # the full path MUST exist in replace operation, so let's check - # that such key exists - key = self._proc_key(start) - if not rest: - self[key] = value - else: - prop = wrap_property(self[key], rest) - self[key] = prop.replace(rest, value) - - def remove(self, path, value=None): - start, delimiter, rest = path.partition('/') - key = self._proc_key(start) - if not rest: - del self[key] - else: - prop = wrap_property(self[key], rest) - prop.remove(rest) - - def add(self, path, value): - start, delimiter, rest = path.partition('/') - if not rest: - self[start] = value - else: - key = self._proc_key(start) - prop = wrap_property(self[key], rest) - self[key] = prop.add(rest, value) diff --git a/glance/opts.py b/glance/opts.py index e1a52c96d4..033b26868d 100644 --- a/glance/opts.py +++ b/glance/opts.py @@ -18,7 +18,6 @@ __all__ = [ 'list_scrubber_opts', 'list_cache_opts', 'list_manage_opts', - 'list_artifacts_opts' ] import copy @@ -108,17 +107,6 @@ _cache_opts = [ _manage_opts = [ (None, []) ] -_artifacts_opts = [ - (None, list(itertools.chain( - glance.api.middleware.context.context_opts, - glance.api.versions.versions_opts, - glance.common.wsgi.bind_opts, - glance.common.wsgi.eventlet_opts, - glance.common.wsgi.socket_opts, - glance.notifier.notifier_opts))), - profiler.list_opts()[0], - ('paste_deploy', glance.common.config.paste_deploy_opts) -] def list_api_opts(): @@ -165,8 +153,3 @@ def list_cache_opts(): def list_manage_opts(): """Return a list of oslo_config options available in Glance manage.""" return [(g, copy.deepcopy(o)) for g, o in _manage_opts] - - -def list_artifacts_opts(): - """Return a list of oslo_config options available in Glance artifacts""" - return [(g, copy.deepcopy(o)) for g, o in _artifacts_opts] diff --git a/glance/tests/functional/db/base_glare.py b/glance/tests/functional/db/base_glare.py deleted file mode 100644 index de72e9a4ee..0000000000 --- a/glance/tests/functional/db/base_glare.py +++ /dev/null @@ -1,907 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import six -from six.moves import range - -from glance.common import exception as exc -from glance import context -import glance.glare as ga -import glance.tests.functional.db as db_tests -from glance.tests import utils as test_utils - - -UUID1, UUID2 = ('80cc6551-9db4-42aa-bb58-51c48757f285', - 'f89c675a-e01c-436c-a384-7d2e784fb2d9') -TYPE_NAME = u'TestArtifactType' -TYPE_VERSION = u'1.0.0' - - -class ArtifactsTestDriver(test_utils.BaseTestCase): - def setUp(self): - super(ArtifactsTestDriver, self).setUp() - context_cls = context.RequestContext - self.adm_context = context_cls(is_admin=True, - auth_token='user:user:admin', - tenant='admin-tenant') - self.context = context_cls(is_admin=False, - auth_token='user:user:user', - tenant='test-tenant') - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - self.create_test_artifacts() - - def create_test_artifacts(self): - dependency = {'2->1': [UUID1]} - self.db_api.artifact_create(self.adm_context, - get_fixture(id=UUID1, - name="TestArtifact1", - visibility="public"), - TYPE_NAME, - TYPE_VERSION) - self.db_api.artifact_create(self.adm_context, - get_fixture(id=UUID2, - name="TestArtifact2", - visibility="public", - dependencies=dependency), - TYPE_NAME, - TYPE_VERSION) - self.art1 = self.db_api.artifact_get(self.context, UUID1, TYPE_NAME, - TYPE_VERSION) - self.art2 = self.db_api.artifact_get(self.context, UUID2, TYPE_NAME, - TYPE_VERSION) - - -class ArtifactTests(object): - def test_artifact_create(self): - artifact = get_fixture() - created = self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(created) - self.assertEqual(artifact['name'], created['name']) - self.assertEqual(artifact['type_name'], created['type_name']) - self.assertEqual(artifact['type_version'], created['type_version']) - - def test_artifact_create_none_valued_props(self): - artifact = get_fixture() - artifact['properties']['lylyly'] = dict(value=None, type='int') - artifact['properties']['hihihi'] = dict(value=5, type='int') - created = self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(created) - self.assertIn('hihihi', created['properties']) - self.assertNotIn('lylyly', created['properties']) - - def test_artifact_update(self): - fixture = {'name': 'UpdatedName'} - updated = self.db_api.artifact_update(self.context, fixture, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(updated) - self.assertEqual('UpdatedName', updated['name']) - self.assertNotEqual(updated['created_at'], updated['updated_at']) - - def test_artifact_create_same_version_different_users(self): - tenant1 = str(uuid.uuid4()) - tenant2 = str(uuid.uuid4()) - ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) - ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) - artifact1 = get_fixture(owner=tenant1) - artifact2 = get_fixture(owner=tenant2) - self.db_api.artifact_create(ctx1, artifact1, - TYPE_NAME, TYPE_VERSION) - - self.assertIsNotNone( - self.db_api.artifact_create(ctx2, artifact2, - TYPE_NAME, TYPE_VERSION)) - - def test_artifact_create_same_version_deleted(self): - artifact1 = get_fixture() - artifact2 = get_fixture(state='deleted') - artifact3 = get_fixture(state='deleted') - self.db_api.artifact_create(self.context, artifact1, - TYPE_NAME, TYPE_VERSION) - - self.assertIsNotNone( - self.db_api.artifact_create(self.context, artifact2, - TYPE_NAME, TYPE_VERSION)) - self.assertIsNotNone( - self.db_api.artifact_create(self.context, artifact3, - TYPE_NAME, TYPE_VERSION)) - - def test_artifact_get(self): - res = self.db_api.artifact_get(self.context, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertEqual('TestArtifact1', res['name']) - self.assertEqual('TestArtifactType', res['type_name']) - self.assertEqual('1.0.0', res['type_version']) - self.assertEqual('10.0.3-alpha+some-date', res['version']) - self.assertEqual('creating', res['state']) - self.assertEqual('test-tenant', res['owner']) - - def test_artifact_get_owned(self): - tenant1 = str(uuid.uuid4()) - tenant2 = str(uuid.uuid4()) - ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) - ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) - - artifact = get_fixture(owner=tenant1) - created = self.db_api.artifact_create(ctx1, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(self.db_api.artifact_get(ctx1, created['id'], - TYPE_NAME, TYPE_VERSION)) - self.assertRaises(exc.ArtifactForbidden, self.db_api.artifact_get, - ctx2, created['id'], TYPE_NAME, TYPE_VERSION) - - def test_artifact_get_public(self): - tenant1 = str(uuid.uuid4()) - tenant2 = str(uuid.uuid4()) - ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) - ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) - - artifact = get_fixture(owner=tenant1, visibility='public') - created = self.db_api.artifact_create(ctx1, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(self.db_api.artifact_get(ctx1, created['id'], - TYPE_NAME, TYPE_VERSION)) - self.assertIsNotNone(self.db_api.artifact_get(ctx2, created['id'], - TYPE_NAME, TYPE_VERSION)) - - def test_artifact_update_state(self): - res = self.db_api.artifact_update(self.context, {'state': 'active'}, - UUID1, TYPE_NAME, TYPE_VERSION) - self.assertEqual('active', res['state']) - - self.assertRaises(exc.InvalidArtifactStateTransition, - self.db_api.artifact_update, self.context, - {'state': 'creating'}, UUID1, - TYPE_NAME, TYPE_VERSION) - - res = self.db_api.artifact_update(self.context, - {'state': 'deactivated'}, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertEqual('deactivated', res['state']) - res = self.db_api.artifact_update(self.context, {'state': 'active'}, - UUID1, TYPE_NAME, TYPE_VERSION) - self.assertEqual('active', res['state']) - res = self.db_api.artifact_update(self.context, {'state': 'deleted'}, - UUID1, TYPE_NAME, TYPE_VERSION) - self.assertEqual('deleted', res['state']) - - self.assertRaises(exc.InvalidArtifactStateTransition, - self.db_api.artifact_update, self.context, - {'state': 'active'}, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertRaises(exc.InvalidArtifactStateTransition, - self.db_api.artifact_update, self.context, - {'state': 'deactivated'}, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertRaises(exc.InvalidArtifactStateTransition, - self.db_api.artifact_update, self.context, - {'state': 'creating'}, UUID1, - TYPE_NAME, TYPE_VERSION) - - def test_artifact_update_tags(self): - res = self.db_api.artifact_update(self.context, - {'tags': ['gagaga', 'lalala']}, - UUID1, TYPE_NAME, TYPE_VERSION) - self.assertEqual(set(['gagaga', 'lalala']), set(res['tags'])) - - def test_artifact_update_properties(self): - new_properties = {'properties': { - 'propname1': { - 'type': 'string', - 'value': 'qeqeqe'}, - 'propname2': { - 'type': 'int', - 'value': 6}, - 'propname3': { - 'type': 'int', - 'value': '5'}, - 'proparray': { - 'type': 'string', - 'value': 'notarray' - }} - } - res = self.db_api.artifact_update(self.context, - new_properties, - UUID1, TYPE_NAME, TYPE_VERSION) - bd_properties = res['properties'] - self.assertEqual(4, len(bd_properties)) - - for prop in bd_properties: - self.assertIn(prop, new_properties['properties']) - - def test_artifact_update_blobs(self): - new_blobs = {'blobs': { - 'blob1': [{ - 'size': 2600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 200000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'newURL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'passive'}] - } - ], - 'blob2': [{ - 'size': 120000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }, { - 'size': 300000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'bl1URL2', - 'status': 'passive'}] - } - ] - } - - } - res = self.db_api.artifact_update(self.context, - new_blobs, - UUID1, TYPE_NAME, TYPE_VERSION) - bd_blobs = res['blobs'] - self.assertEqual(2, len(bd_blobs)) - for blob in bd_blobs: - self.assertIn(blob, new_blobs['blobs']) - - def test_artifact_create_with_dependency(self): - dependencies = {"new->2": [UUID2]} - artifact = get_fixture(dependencies=dependencies) - res = self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(res) - - created = self.db_api.artifact_get( - self.context, res['id'], TYPE_NAME, TYPE_VERSION, - show_level=ga.Showlevel.DIRECT) - bd_dependencies = created['dependencies'] - self.assertEqual(1, len(bd_dependencies)) - # now try to update artifact with the same dependency - new_dependencies = {"dependencies": {"new->2": [UUID2], - "new->3": [UUID2]}} - res = self.db_api.artifact_update(self.context, - new_dependencies, - UUID1, TYPE_NAME, TYPE_VERSION) - retrieved = self.db_api.artifact_get( - self.context, res['id'], - TYPE_NAME, TYPE_VERSION, show_level=ga.Showlevel.DIRECT) - self.assertEqual(2, len(retrieved["dependencies"])) - - def test_artifact_create_transitive_dependencies(self): - dependencies = {"new->2": [UUID2]} - artifact = get_fixture(dependencies=dependencies, id='new') - res = self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - self.assertIsNotNone(res) - - created = self.db_api.artifact_get( - self.context, res['id'], TYPE_NAME, TYPE_VERSION, - show_level=ga.Showlevel.DIRECT) - bd_dependencies = created['dependencies'] - self.assertEqual(1, len(bd_dependencies)) - - res = self.db_api.artifact_publish( - self.context, - res['id'], TYPE_NAME, TYPE_VERSION - ) - - res = self.db_api.artifact_get( - self.context, res['id'], TYPE_NAME, TYPE_VERSION, - show_level=ga.Showlevel.TRANSITIVE) - self.assertIsNotNone(res.pop('created_at')) - self.assertIsNotNone(res.pop('updated_at')) - - # NOTE(mfedosin): tags is a set, so we have to check it separately - tags = res.pop('tags', None) - self.assertIsNotNone(tags) - self.assertEqual(set(['gugugu', 'lalala']), set(tags)) - - tags = res['dependencies']['new->2'][0].pop('tags', None) - self.assertIsNotNone(tags) - self.assertEqual(set(['gugugu', 'lalala']), set(tags)) - - tags = (res['dependencies']['new->2'][0]['dependencies']['2->1'][0]. - pop('tags', None)) - self.assertIsNotNone(tags) - self.assertEqual(set(['gugugu', 'lalala']), set(tags)) - - expected = { - 'id': 'new', - 'name': u'SomeArtifact', - 'description': None, - 'type_name': TYPE_NAME, - 'type_version': TYPE_VERSION, - 'version': u'10.0.3-alpha+some-date', - 'visibility': u'private', - 'state': u'active', - 'owner': u'test-tenant', - 'published_at': None, - 'deleted_at': None, - 'properties': { - 'propname1': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 5}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'}, - 'proparray': { - 'type': 'array', - 'value': [ - {'type': 'int', - 'value': 6}, - {'type': 'string', - 'value': 'rerere'} - ] - } - }, - 'blobs': { - 'blob1': [{ - 'size': 1600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 100000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }] - }, - 'dependencies': { - 'new->2': [ - { - 'id': UUID2, - 'created_at': self.art2['created_at'], - 'updated_at': self.art2['updated_at'], - 'published_at': None, - 'deleted_at': None, - 'name': u'TestArtifact2', - 'description': None, - 'type_name': TYPE_NAME, - 'type_version': TYPE_VERSION, - 'version': u'10.0.3-alpha+some-date', - 'visibility': 'public', - 'state': u'creating', - 'owner': u'test-tenant', - 'properties': { - 'propname1': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 5}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'}, - 'proparray': { - 'type': 'array', - 'value': [ - {'type': 'int', - 'value': 6}, - {'type': 'string', - 'value': 'rerere'} - ] - } - }, - 'blobs': { - 'blob1': [{ - 'size': 1600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 100000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }] - }, - 'dependencies': { - '2->1': [ - { - 'id': UUID1, - 'created_at': self.art1['created_at'], - 'updated_at': self.art1['updated_at'], - 'published_at': None, - 'deleted_at': None, - 'dependencies': {}, - 'name': u'TestArtifact1', - 'description': None, - 'type_name': TYPE_NAME, - 'type_version': TYPE_VERSION, - 'version': u'10.0.3-alpha+some-date', - 'visibility': 'public', - 'state': u'creating', - 'owner': u'test-tenant', - 'properties': { - 'propname1': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 5}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'}, - 'proparray': { - 'type': 'array', - 'value': [ - {'type': 'int', - 'value': 6}, - {'type': 'string', - 'value': 'rerere'} - ] - } - }, - 'blobs': { - 'blob1': [{ - 'size': 1600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 100000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }] - } - } - ] - } - } - ] - } - } - self.assertIsNotNone(res['published_at']) - published_at = res['published_at'] - expected['published_at'] = published_at - for key, value in six.iteritems(expected): - self.assertEqual(expected[key], res[key]) - - def test_artifact_get_all(self): - artifact = get_fixture(name='new_artifact') - self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - artifacts = self.db_api.artifact_get_all(self.context) - self.assertEqual(3, len(artifacts)) - - def test_artifact_sort_order(self): - arts = [get_fixture(version='1.2.3-alpha.4.df.00f'), - get_fixture(version='1.2.2'), - get_fixture(version='1.2.3+some-metadata'), - get_fixture(version='1.2.4'), - get_fixture(version='1.2.3-release.2'), - get_fixture(version='1.2.3-release.1+metadata'), - get_fixture(version='1.2.3-final'), - get_fixture(version='1.2.3-alpha.14.df.00f')] - for art in arts: - self.db_api.artifact_create(self.context, art, TYPE_NAME, - TYPE_VERSION) - artifacts = self.db_api.artifact_get_all(self.context, - sort_keys=[('version', - None)], - sort_dirs=['asc']) - - expected_versions = [ - '1.2.2', - '1.2.3-alpha.4.df.00f', - '1.2.3-alpha.14.df.00f', - '1.2.3-final', - '1.2.3-release.1+metadata', - '1.2.3-release.2', - '1.2.3+some-metadata', - '1.2.4'] - for i in range(len(expected_versions)): - self.assertEqual(expected_versions[i], artifacts[i]['version']) - - def test_artifact_get_all_show_level(self): - artifacts = self.db_api.artifact_get_all(self.context) - self.assertEqual(2, len(artifacts)) - - self.assertRaises(KeyError, lambda: artifacts[0]['properties']) - - artifacts = self.db_api.artifact_get_all( - self.context, show_level=ga.Showlevel.BASIC) - self.assertEqual(2, len(artifacts)) - self.assertEqual(4, len(artifacts[0]['properties'])) - - self.assertRaises(exc.ArtifactUnsupportedShowLevel, - self.db_api.artifact_get_all, self.context, - show_level=ga.Showlevel.DIRECT) - - def test_artifact_get_all_tags(self): - artifact = get_fixture(name='new_artifact', - tags=['qwerty', 'uiop']) - self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - artifacts = self.db_api.artifact_get_all(self.context) - self.assertEqual(3, len(artifacts)) - - filters = {'tags': [{ - 'value': 'notag', - }]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(0, len(artifacts)) - - filters = {'tags': [{ - 'value': 'lalala', - }]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(2, len(artifacts)) - for artifact in artifacts: - self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) - - def test_artifact_get_all_properties(self): - artifact = get_fixture( - name='new_artifact', - properties={ - 'newprop2': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 3}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'}, - 'proptext': { - 'type': 'text', - 'value': 'bebebe' * 100}, - 'proparray': { - 'type': 'array', - 'value': [ - {'type': 'int', - 'value': 17}, - {'type': 'string', - 'value': 'rerere'} - ] - }}) - self.db_api.artifact_create(self.context, artifact, - TYPE_NAME, TYPE_VERSION) - - filters = {'propname2': [{ - 'value': 4, - 'operator': 'GT', - 'type': 'int'}]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(2, len(artifacts)) - for artifact in artifacts: - self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) - - # position hasn't been set - filters = {'proparray': [{ - 'value': 6, - 'operator': 'LE', - 'type': 'int'}]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(0, len(artifacts)) - for artifact in artifacts: - self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) - - # position has been set - filters = {'proparray': [{ - 'value': 6, - 'position': 0, - 'operator': 'LE', - 'type': 'int'}]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(2, len(artifacts)) - for artifact in artifacts: - self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) - - filters = {'proparray': [{ - 'value': 6, - 'operator': 'IN', - 'type': 'int'}]} - artifacts = self.db_api.artifact_get_all(self.context, filters=filters) - self.assertEqual(2, len(artifacts)) - for artifact in artifacts: - self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) - - filters = {'name': [{'value': 'new_artifact'}]} - artifacts = self.db_api.artifact_get_all(self.context, - filters=filters, - show_level=ga.Showlevel.BASIC) - self.assertEqual(1, len(artifacts)) - artifact = artifacts[0] - self.assertEqual('new_artifact', artifact['name']) - for prop in artifact['properties'].keys(): - self.assertNotEqual('proptext', prop) - - filters = {'propname2': [{ - 'value': 4, - 'operator': 'FOO', - 'type': 'int'}]} - self.assertRaises( - exc.ArtifactUnsupportedPropertyOperator, - self.db_api.artifact_get_all, self.context, filters=filters) - - def test_artifact_delete(self): - res = self.db_api.artifact_delete(self.context, UUID1, - TYPE_NAME, TYPE_VERSION) - self.assertEqual('TestArtifact1', res['name']) - self.assertEqual('deleted', res['state']) - self.assertIsNotNone(res['deleted_at']) - - artifacts = self.db_api.artifact_get_all(self.context) - self.assertEqual(1, len(artifacts)) - - def test_artifact_delete_property(self): - - new_properties = {'properties': { - 'proparray': {'value': [], - 'type': 'array'} - } - } - res = self.db_api.artifact_update(self.context, - new_properties, - UUID1, TYPE_NAME, TYPE_VERSION) - bd_properties = res['properties'] - self.assertEqual(3, len(bd_properties)) - - expected = { - 'propname1': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 5}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'} - } - - for prop in bd_properties: - self.assertIn(prop, expected) - - def test_artifact_delete_blob(self): - - new_blobs = {'blobs': { - 'blob2': [{ - 'size': 2600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 200000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'newURL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'passive'}] - } - ], - 'blob3': [{ - 'size': 120000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }, { - 'size': 300000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'bl1URL2', - 'status': 'passive'}] - } - ] - } - - } - - expected = {'blobs': { - 'blob1': [{ - 'size': 1600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 100000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - } - ], - 'blob2': [{ - 'size': 2600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 200000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'newURL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'passive'}] - } - ], - 'blob3': [{ - 'size': 120000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - }, { - 'size': 300000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'bl1URL2', - 'status': 'passive'}] - } - ] - } - - } - - res = self.db_api.artifact_update(self.context, - new_blobs, - UUID1, TYPE_NAME, TYPE_VERSION) - bd_blobs = res['blobs'] - self.assertEqual(3, len(bd_blobs)) - for blob in bd_blobs: - self.assertIn(blob, expected['blobs']) - - del_blobs = {'blobs': { - 'blob1': []} - } - - res = self.db_api.artifact_update(self.context, - del_blobs, - UUID1, TYPE_NAME, TYPE_VERSION) - bd_blobs = res['blobs'] - self.assertEqual(2, len(bd_blobs)) - - for blob in bd_blobs: - self.assertIn(blob, new_blobs['blobs']) - - -def get_fixture(**kwargs): - artifact = { - 'name': u'SomeArtifact', - 'type_name': TYPE_NAME, - 'type_version': TYPE_VERSION, - 'version': u'10.0.3-alpha+some-date', - 'visibility': u'private', - 'state': u'creating', - 'owner': u'test-tenant', - 'tags': ['lalala', 'gugugu'], - 'properties': { - 'propname1': { - 'type': 'string', - 'value': 'tututu'}, - 'propname2': { - 'type': 'int', - 'value': 5}, - 'propname3': { - 'type': 'string', - 'value': 'vavava'}, - 'proparray': { - 'type': 'array', - 'value': [ - {'type': 'int', - 'value': 6}, - {'type': 'string', - 'value': 'rerere'} - ] - } - }, - 'blobs': { - 'blob1': [{ - 'size': 1600000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL11', - 'status': 'active'}, - {'value': 'URL12', - 'status': 'active'}] - }, { - 'size': 100000, - 'checksum': 'abc', - 'item_key': 'some', - 'locations': [ - {'value': 'URL21', - 'status': 'active'}, - {'value': 'URL22', - 'status': 'active'}] - } - ] - } - } - - artifact.update(kwargs) - return artifact diff --git a/glance/tests/functional/db/migrations/test_pike01.py b/glance/tests/functional/db/migrations/test_pike01.py new file mode 100644 index 0000000000..fcb6db4605 --- /dev/null +++ b/glance/tests/functional/db/migrations/test_pike01.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db.sqlalchemy import test_base +from oslo_db.sqlalchemy import utils as db_utils +import sqlalchemy + +from glance.tests.functional.db import test_migrations + + +class TestPike01Mixin(test_migrations.AlembicMigrationsMixin): + + artifacts_table_names = [ + 'artifact_blob_locations', + 'artifact_properties', + 'artifact_blobs', + 'artifact_dependencies', + 'artifact_tags', + 'artifacts' + ] + + def _pre_upgrade_pike01(self, engine): + # verify presence of the artifacts tables + for table_name in self.artifacts_table_names: + table = db_utils.get_table(engine, table_name) + self.assertIsNotNone(table) + + def _check_pike01(self, engine, data): + # verify absence of the artifacts tables + for table_name in self.artifacts_table_names: + self.assertRaises(sqlalchemy.exc.NoSuchTableError, + db_utils.get_table, engine, table_name) + + +class TestPike01MySQL(TestPike01Mixin, test_base.MySQLOpportunisticTestCase): + pass + + +class TestPike01PostgresSQL(TestPike01Mixin, + test_base.PostgreSQLOpportunisticTestCase): + pass + + +class TestPike01Sqlite(TestPike01Mixin, test_base.DbTestCase): + pass diff --git a/glance/tests/functional/db/migrations/test_pike_contract01.py b/glance/tests/functional/db/migrations/test_pike_contract01.py new file mode 100644 index 0000000000..3a04f56a02 --- /dev/null +++ b/glance/tests/functional/db/migrations/test_pike_contract01.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db.sqlalchemy import test_base +from oslo_db.sqlalchemy import utils as db_utils +import sqlalchemy + +from glance.tests.functional.db import test_migrations + + +class TestPikeContract01Mixin(test_migrations.AlembicMigrationsMixin): + + artifacts_table_names = [ + 'artifact_blob_locations', + 'artifact_properties', + 'artifact_blobs', + 'artifact_dependencies', + 'artifact_tags', + 'artifacts' + ] + + def _get_revisions(self, config): + return test_migrations.AlembicMigrationsMixin._get_revisions( + self, config, head='pike_contract01') + + def _pre_upgrade_pike_contract01(self, engine): + # verify presence of the artifacts tables + for table_name in self.artifacts_table_names: + table = db_utils.get_table(engine, table_name) + self.assertIsNotNone(table) + + def _check_pike_contract01(self, engine, data): + # verify absence of the artifacts tables + for table_name in self.artifacts_table_names: + self.assertRaises(sqlalchemy.exc.NoSuchTableError, + db_utils.get_table, engine, table_name) + + +class TestPikeContract01MySQL(TestPikeContract01Mixin, + test_base.MySQLOpportunisticTestCase): + pass diff --git a/glance/tests/functional/db/migrations/test_pike_expand01.py b/glance/tests/functional/db/migrations/test_pike_expand01.py new file mode 100644 index 0000000000..2ad4481a76 --- /dev/null +++ b/glance/tests/functional/db/migrations/test_pike_expand01.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db.sqlalchemy import test_base +from oslo_db.sqlalchemy import utils as db_utils + +from glance.tests.functional.db import test_migrations + + +class TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin): + + artifacts_table_names = [ + 'artifact_blob_locations', + 'artifact_properties', + 'artifact_blobs', + 'artifact_dependencies', + 'artifact_tags', + 'artifacts' + ] + + def _get_revisions(self, config): + return test_migrations.AlembicMigrationsMixin._get_revisions( + self, config, head='pike_expand01') + + def _pre_upgrade_pike_expand01(self, engine): + # verify presence of the artifacts tables + for table_name in self.artifacts_table_names: + table = db_utils.get_table(engine, table_name) + self.assertIsNotNone(table) + + def _check_pike_expand01(self, engine, data): + # should be no changes, so re-run pre-upgrade check + self._pre_upgrade_pike_expand01(engine) + + +class TestPikeExpand01MySQL(TestPikeExpand01Mixin, + test_base.MySQLOpportunisticTestCase): + pass diff --git a/glance/contrib/plugins/image_artifact/setup.py b/glance/tests/functional/db/migrations/test_pike_migrate01.py similarity index 63% rename from glance/contrib/plugins/image_artifact/setup.py rename to glance/tests/functional/db/migrations/test_pike_migrate01.py index 2a3ea51e79..ee13b7c0ab 100644 --- a/glance/contrib/plugins/image_artifact/setup.py +++ b/glance/tests/functional/db/migrations/test_pike_migrate01.py @@ -1,6 +1,3 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,8 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -import setuptools +from oslo_db.sqlalchemy import test_base -# all other params will be taken from setup.cfg -setuptools.setup(packages=setuptools.find_packages(), - setup_requires=['pbr'], pbr=True) +import glance.tests.functional.db.migrations.test_pike_expand01 as tpe01 + + +# no TestPikeMigrate01Mixin class needed, can use TestPikeExpand01Mixin instead + + +class TestPikeMigrate01MySQL(tpe01.TestPikeExpand01Mixin, + test_base.MySQLOpportunisticTestCase): + pass diff --git a/glance/tests/functional/db/test_migrations.py b/glance/tests/functional/db/test_migrations.py index c364a11c5a..e7d51a2794 100644 --- a/glance/tests/functional/db/test_migrations.py +++ b/glance/tests/functional/db/test_migrations.py @@ -27,7 +27,6 @@ from glance.db import migration as db_migration from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy.alembic_migrations import versions from glance.db.sqlalchemy import models -from glance.db.sqlalchemy import models_glare from glance.db.sqlalchemy import models_metadef import glance.tests.utils as test_utils @@ -120,8 +119,6 @@ class ModelsMigrationSyncMixin(object): def get_metadata(self): for table in models_metadef.BASE_DICT.metadata.sorted_tables: models.BASE.metadata._add_table(table.name, table.schema, table) - for table in models_glare.BASE.metadata.sorted_tables: - models.BASE.metadata._add_table(table.name, table.schema, table) return models.BASE.metadata def get_engine(self): diff --git a/glance/tests/functional/db/test_sqlalchemy.py b/glance/tests/functional/db/test_sqlalchemy.py index 20575d14e2..57070b2b05 100644 --- a/glance/tests/functional/db/test_sqlalchemy.py +++ b/glance/tests/functional/db/test_sqlalchemy.py @@ -20,11 +20,9 @@ from oslo_db import options from glance.common import exception import glance.db.sqlalchemy.api from glance.db.sqlalchemy import models as db_models -from glance.db.sqlalchemy import models_glare as artifact_models from glance.db.sqlalchemy import models_metadef as metadef_models import glance.tests.functional.db as db_tests from glance.tests.functional.db import base -from glance.tests.functional.db import base_glare from glance.tests.functional.db import base_metadef CONF = cfg.CONF @@ -47,11 +45,6 @@ def reset_db_metadef(db_api): metadef_models.register_models(db_api.get_engine()) -def reset_db_artifacts(db_api): - artifact_models.unregister_models(db_api.get_engine()) - artifact_models.register_models(db_api.get_engine()) - - class TestSqlAlchemyDriver(base.TestDriver, base.DriverTests, base.FunctionalInitWrapper): @@ -169,14 +162,6 @@ class TestDBPurge(base.DBPurgeTests, self.addCleanup(db_tests.reset) -class TestArtifacts(base_glare.ArtifactsTestDriver, - base_glare.ArtifactTests): - def setUp(self): - db_tests.load(get_db, reset_db_artifacts) - super(TestArtifacts, self).setUp() - self.addCleanup(db_tests.reset) - - class TestMetadefSqlAlchemyDriver(base_metadef.TestMetadefDriver, base_metadef.MetadefDriverTests, base.FunctionalInitWrapper): diff --git a/glance/tests/functional/glare/__init__.py b/glance/tests/functional/glare/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/glance/tests/functional/glare/test_glare.py b/glance/tests/functional/glare/test_glare.py deleted file mode 100644 index 56dd8895b7..0000000000 --- a/glance/tests/functional/glare/test_glare.py +++ /dev/null @@ -1,2016 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import uuid - -import mock -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http -from stevedore import extension - -from glance.api.glare.v0_1 import glare -from glance.api.glare.v0_1 import router -from glance.common.glare import definitions -from glance.common.glare import loader -from glance.common import wsgi -from glance.tests import functional - - -class Artifact(definitions.ArtifactType): - __type_name__ = "WithProps" - prop1 = definitions.String() - prop2 = definitions.Integer() - prop_list = definitions.Array(item_type=definitions.Integer()) - tuple_prop = definitions.Array(item_type=[definitions.Integer(), - definitions.Boolean()]) - dict_prop = definitions.Dict(properties={ - "foo": definitions.String(), - "bar_list": definitions.Array(definitions.Integer())}) - dict_prop_strval = definitions.Dict(properties=definitions.String()) - depends_on = definitions.ArtifactReference() - depends_on_list = definitions.ArtifactReferenceList() - - -class ArtifactNoProps(definitions.ArtifactType): - __type_name__ = "NoProp" - - -class ArtifactNoProps1(definitions.ArtifactType): - __type_name__ = "NoProp" - __type_version__ = "0.5" - - -class ArtifactWithBlob(definitions.ArtifactType): - __type_name__ = "WithBlob" - blob1 = definitions.BinaryObject() - blob_list = definitions.BinaryObjectList() - - -def _create_resource(): - test_loader = extension.ExtensionManager.make_test_instance( - extensions=[ - extension.Extension( - name='WithProps', - entry_point=mock.Mock(), - plugin=Artifact, - obj=None, - ), - extension.Extension( - name='NoProp', - entry_point=mock.Mock(), - plugin=ArtifactNoProps, - obj=None, - ), - extension.Extension( - name='NoProp', - entry_point=mock.Mock(), - plugin=ArtifactNoProps1, - obj=None, - ), - extension.Extension( - name='WithBlob', - entry_point=mock.Mock(), - plugin=ArtifactWithBlob, - obj=None, - ), - ], - ) - plugins = loader.ArtifactsPluginLoader( - 'glance.artifacts.types', - test_plugins=test_loader, - ) - deserializer = glare.RequestDeserializer(plugins=plugins) - serializer = glare.ResponseSerializer() - controller = glare.ArtifactsController(plugins=plugins) - return wsgi.Resource(controller, deserializer, serializer) - - -class TestRouter(router.API): - def _get_artifacts_resource(self): - return _create_resource() - - -class TestArtifacts(functional.FunctionalTest): - - users = { - 'user1': { - 'id': str(uuid.uuid4()), - 'tenant_id': str(uuid.uuid4()), - 'token': str(uuid.uuid4()), - 'role': 'member' - }, - 'user2': { - 'id': str(uuid.uuid4()), - 'tenant_id': str(uuid.uuid4()), - 'token': str(uuid.uuid4()), - 'role': 'member' - }, - 'admin': { - 'id': str(uuid.uuid4()), - 'tenant_id': str(uuid.uuid4()), - 'token': str(uuid.uuid4()), - 'role': 'admin' - } - } - - def setUp(self): - super(TestArtifacts, self).setUp() - self._set_user('user1') - self.api_server.server_name = 'glare' - self.api_server.server_module = 'glance.cmd.glare' - self.api_server.deployment_flavor = 'noauth' - self.start_servers(**self.__dict__.copy()) - - def tearDown(self): - self.stop_servers() - self._reset_database(self.api_server.sql_connection) - super(TestArtifacts, self).tearDown() - - def _url(self, path): - return 'http://127.0.0.1:%d/v0.1/artifacts%s' % (self.api_port, path) - - def _set_user(self, username): - if username not in self.users: - raise KeyError - self.current_user = username - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': self.users[self.current_user]['token'], - 'X-User-Id': self.users[self.current_user]['id'], - 'X-Tenant-Id': self.users[self.current_user]['tenant_id'], - 'X-Roles': self.users[self.current_user]['role'], - } - base_headers.update(custom_headers or {}) - return base_headers - - def start_servers(self, **kwargs): - # noqa - new_paste_conf_base = """[pipeline:glare-api] -pipeline = versionnegotiation unauthenticated-context rootapp - -[pipeline:glare-api-fakeauth] -pipeline = versionnegotiation fakeauth context rootapp - -[pipeline:glare-api-noauth] -pipeline = versionnegotiation context rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v0.1: glareapi - -[app:apiversions] -paste.app_factory = glance.api.glare.versions:create_resource - -[app:glareapi] -paste.app_factory = - glance.tests.functional.glare.test_glare:TestRouter.factory - -[filter:versionnegotiation] -paste.filter_factory = - glance.api.middleware.version_negotiation: - GlareVersionNegotiationFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - self.cleanup() - self.api_server.paste_conf_base = new_paste_conf_base - super(TestArtifacts, self).start_servers(**kwargs) - - def _create_artifact(self, type_name, type_version='1.0', data=None, - status=http.CREATED): - # create an artifact first - artifact_data = data or {'name': 'artifact-1', - 'version': '12'} - return self._check_artifact_post('/%s/v%s/drafts' % (type_name, - type_version), - artifact_data, status=status) - - def _check_artifact_method(self, method, url, data=None, status=http.OK, - headers=None): - if not headers: - headers = self._headers() - else: - headers = self._headers(headers) - headers.setdefault("Content-Type", "application/json") - if 'application/json' in headers['Content-Type']: - data = jsonutils.dumps(data) - response = getattr(requests, method)(self._url(url), headers=headers, - data=data) - self.assertEqual(status, response.status_code) - if status >= http.BAD_REQUEST: - return response.text - if "application/json" in response.headers["content-type"]: - return jsonutils.loads(response.text) - return response.text - - def _check_artifact_post(self, url, data, status=http.CREATED, - headers=None): - if headers is None: - headers = {'Content-Type': 'application/json'} - - return self._check_artifact_method("post", url, data, status=status, - headers=headers) - - def _check_artifact_get(self, url, status=http.OK): - return self._check_artifact_method("get", url, status=status) - - def _check_artifact_delete(self, url, status=http.NO_CONTENT): - response = requests.delete(self._url(url), headers=self._headers()) - self.assertEqual(status, response.status_code) - return response.text - - def _check_artifact_patch(self, url, data, status=http.OK, - headers={'Content-Type': 'application/json'}): - return self._check_artifact_method("patch", url, data, status=status, - headers=headers) - - def _check_artifact_put(self, url, data, status=http.OK, - headers={'Content-Type': 'application/json'}): - return self._check_artifact_method("put", url, data, status=status, - headers=headers) - - def test_list_any_artifacts(self): - """Returns information about all draft artifacts with given endpoint""" - self._create_artifact('noprop') - artifacts = self._check_artifact_get('/noprop/drafts')["artifacts"] - self.assertEqual(1, len(artifacts)) - - def test_list_last_version(self): - """/artifacts/endpoint == /artifacts/endpoint/all-versions""" - self._create_artifact('noprop') - artifacts = self._check_artifact_get('/noprop/drafts')["artifacts"] - self.assertEqual(1, len(artifacts)) - # the same result can be achieved if asked for artifact with - # type_version=last version - artifacts_precise = self._check_artifact_get( - '/noprop/v1.0/drafts')["artifacts"] - self.assertEqual(artifacts, artifacts_precise) - - def test_list_artifacts_by_state(self): - """Returns last version of artifacts with given state""" - self._create_artifact('noprop') - creating_state = self._check_artifact_get( - '/noprop/drafts')["artifacts"] - self.assertEqual(1, len(creating_state)) - # no active [/type_name/active == /type_name] - active_state = self._check_artifact_get('/noprop')["artifacts"] - self.assertEqual(0, len(active_state)) - - def test_list_artifacts_with_version(self): - """Supplying precise artifact version does not break anything""" - self._create_artifact('noprop') - list_creating = self._check_artifact_get( - '/noprop/v1.0/drafts')["artifacts"] - self.assertEqual(1, len(list_creating)) - bad_version = self._check_artifact_get('/noprop/v1.0bad', - status=http.BAD_REQUEST) - self.assertIn("Invalid version string: u'1.0bad'", bad_version) - - def test_list_artifacts_with_pagination(self): - """List artifacts with pagination""" - # create artifacts - art1 = {'name': 'artifact-1', - 'version': '12'} - art2 = {'name': 'artifact-2', - 'version': '12'} - self._create_artifact('noprop', data=art1) - self._create_artifact('noprop', data=art2) - # sorting is desc by default - first_page = self._check_artifact_get( - '/noprop/drafts?limit=1&sort=name') - # check the first artifacts has returned correctly - self.assertEqual(1, len(first_page["artifacts"])) - self.assertEqual("artifact-2", first_page["artifacts"][0]["name"]) - self.assertIn("next", first_page) - # check the second page - second_page_url = first_page["next"].split("artifacts", 1)[1] - second_page = self._check_artifact_get(second_page_url) - self.assertIn("next", second_page) - self.assertEqual(1, len(second_page["artifacts"])) - self.assertEqual("artifact-1", second_page["artifacts"][0]["name"]) - # check that the latest item is empty - last_page_url = second_page["next"].split("artifacts", 1)[1] - last_page = self._check_artifact_get(last_page_url) - self.assertEqual(0, len(last_page["artifacts"])) - self.assertNotIn("next", last_page) - - def test_get_artifact_by_id_any_version(self): - data = self._create_artifact('noprop') - artifact_id = data['id'] - artifacts = self._check_artifact_get( - '/noprop/%s' % artifact_id) - self.assertEqual(artifact_id, artifacts['id']) - - def test_list_artifact_no_such_version(self): - """Version filtering should be applied for existing plugins. - - An attempt to retrieve an artifact out of existing plugin but with - a wrong version should result in - 400 BadRequest 'No such plugin has been loaded' - """ - msg = self._check_artifact_get('/noprop/v0.0.9', http.BAD_REQUEST) - self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded", - msg) - - def test_get_artifact_by_id(self): - data = self._create_artifact('noprop') - artifact_id = data['id'] - artifacts = self._check_artifact_get( - '/noprop/%s' % artifact_id) - self.assertEqual(artifact_id, artifacts['id']) - # the same result can be achieved if asked for artifact with - # type_version=last version - artifacts_precise = self._check_artifact_get( - '/noprop/v1.0/%s' % artifact_id) - self.assertEqual(artifacts, artifacts_precise) - - def test_get_artifact_basic_show_level(self): - no_prop_art = self._create_artifact('noprop') - art = self._create_artifact( - 'withprops', - data={"name": "name", "version": "42", - "depends_on": no_prop_art['id']}) - self.assertEqual(no_prop_art['id'], art['depends_on']['id']) - self.assertEqual(no_prop_art['name'], art['depends_on']['name']) - - artifact_id = art['id'] - artifact = self._check_artifact_get( - '/withprops/%s?show_level=basic' % artifact_id) - self.assertEqual(artifact_id, artifact['id']) - self.assertIsNone(artifact['depends_on']) - - def test_get_artifact_none_show_level(self): - """Create an artifact (with two deployer-defined properties)""" - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'tags': ['gagaga', 'sesese'], - 'prop1': 'Arthur Dent', - 'prop2': 42} - art = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - expected_artifact = { - 'state': 'creating', - 'name': 'artifact-1', - 'version': '12.0.0', - 'tags': ['gagaga', 'sesese'], - 'visibility': 'private', - 'type_name': 'WithProps', - 'type_version': '1.0', - 'prop1': 'Arthur Dent', - 'prop2': 42 - } - for key, value in expected_artifact.items(): - self.assertEqual(art[key], value, key) - - artifact_id = art['id'] - artifact = self._check_artifact_get( - '/withprops/%s?show_level=none' % artifact_id) - self.assertEqual(artifact_id, artifact['id']) - self.assertIsNone(artifact['prop1']) - self.assertIsNone(artifact['prop2']) - - def test_get_artifact_invalid_show_level(self): - no_prop_art = self._create_artifact('noprop') - art = self._create_artifact( - 'withprops', - data={"name": "name", "version": "42", - "depends_on": no_prop_art['id']}) - self.assertEqual(no_prop_art['id'], art['depends_on']['id']) - self.assertEqual(no_prop_art['name'], art['depends_on']['name']) - - artifact_id = art['id'] - # 'hui' is invalid show level - self._check_artifact_get( - '/noprop/%s?show_level=yoba' % artifact_id, - status=http.BAD_REQUEST) - - def test_get_artifact_no_such_id(self): - msg = self._check_artifact_get( - '/noprop/%s' % str(uuid.uuid4()), status=http.NOT_FOUND) - self.assertIn('No artifact found with ID', msg) - - def test_get_artifact_present_id_wrong_type(self): - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'prop1': '12', - 'prop2': 12} - art1 = self._create_artifact('withprops', data=artifact_data) - art2 = self._create_artifact('noprop') - # ok id and type_name but bad type_version should result in 404 - self._check_artifact_get('/noprop/v0.5/%s' % str(art2['id']), - status=http.NOT_FOUND) - # try to access art2 by supplying art1.type and art2.id - self._check_artifact_get('/withprops/%s' % str(art2['id']), - status=http.NOT_FOUND) - self._check_artifact_get('/noprop/%s' % str(art1['id']), - status=http.NOT_FOUND) - - def test_delete_artifact(self): - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'prop1': '12', - 'prop2': 12} - art1 = self._create_artifact('withprops', data=artifact_data) - self._check_artifact_delete('/withprops/v1.0/%s' % art1['id']) - art1_deleted = self._check_artifact_get('/withprops/%s' % art1['id'], - status=http.NOT_FOUND) - self.assertIn('No artifact found with ID', art1_deleted) - - def test_delete_artifact_no_such_id(self): - self._check_artifact_delete('/noprop/v1/%s' % str(uuid.uuid4()), - status=http.NOT_FOUND) - - @unittest.skip("Test is unstable") - def test_delete_artifact_with_dependency(self): - # make sure that artifact can't be deleted if it has some dependencies - # still not deleted - art = self._create_artifact('withprops') - no_prop_art = self._create_artifact('noprop') - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'value': no_prop_art['id'], - 'op': 'replace', - 'path': '/depends_on'}, - {'value': [no_prop_art['id']], - 'op': 'add', - 'path': '/depends_on_list'}]) - self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id']) - self.assertEqual(1, len(art_updated['depends_on_list'])) - # try to delete an artifact prior to its dependency - res = self._check_artifact_delete('/withprops/v1/%s' % art['id'], - status=http.BAD_REQUEST) - self.assertIn( - "Dependency property 'depends_on' has to be deleted first", res) - # delete a dependency - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'op': 'remove', 'path': '/depends_on'}]) - # try to delete prior to deleting artifact_list dependencies - res = self._check_artifact_delete('/withprops/v1/%s' % art['id'], - status=http.BAD_REQUEST) - self.assertIn( - "Dependency property 'depends_on_list' has to be deleted first", - res) - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'op': 'remove', 'path': '/depends_on_list'}]) - # delete dependency list - self._check_artifact_delete('/withprops/v1/%s' % art['id']) - - def test_delete_artifact_with_blob(self): - # Upload some data to an artifact - art = self._create_artifact('withblob') - headers = self._headers({'Content-Type': 'application/octet-stream'}) - self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], - headers=headers, - data='ZZZZZ', status=http.OK) - self._check_artifact_delete('/withblob/v1/%s' % art['id']) - - def test_update_nonexistent_property_by_replace_op(self): - art = self._create_artifact('withprops', data={'name': 'some art', - 'version': '4.2'}) - data = [{'op': 'replace', 'value': 'some value', - 'path': '/nonexistent_property'}] - result = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data, - status=http.BAD_REQUEST) - self.assertIn('400 Bad Request', result) - self.assertIn('Artifact has no property nonexistent_property', result) - - def test_update_nonexistent_property_by_remove_op(self): - art = self._create_artifact('withprops', data={'name': 'some art', - 'version': '4.2'}) - data = [{'op': 'replace', 'value': 'some value', - 'path': '/nonexistent_property'}] - result = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data, - status=http.BAD_REQUEST) - self.assertIn('400 Bad Request', result) - self.assertIn('Artifact has no property nonexistent_property', result) - - def test_update_nonexistent_property_by_add_op(self): - art = self._create_artifact('withprops', data={'name': 'some art', - 'version': '4.2'}) - data = [{'op': 'replace', 'value': 'some value', - 'path': '/nonexistent_property'}] - result = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data, - status=400) - self.assertIn('400 Bad Request', result) - self.assertIn('Artifact has no property nonexistent_property', result) - - def test_update_array_property_by_replace_op(self): - art = self._create_artifact('withprops', data={'name': 'some art', - 'version': '4.2'}) - self.assertEqual('some art', art['name']) - data = [{'op': 'replace', 'value': [1, 2, 3], 'path': '/prop_list'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data) - self.assertEqual([1, 2, 3], art_updated['prop_list']) - # now try to change first element of the list - data_change_first = [{'op': 'replace', 'value': 42, - 'path': '/prop_list/1'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data_change_first) - self.assertEqual([1, 42, 3], art_updated['prop_list']) - # replace last element - data_change_last = [{'op': 'replace', 'value': 24, - 'path': '/prop_list/-'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' % - art['id'], - data=data_change_last) - self.assertEqual([1, 42, 24], art_updated['prop_list']) - - def test_update_dict_property_by_replace_op(self): - art = self._create_artifact( - 'withprops', - data={'name': 'some art', - 'version': '4.2', - 'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}}) - self.assertEqual({'foo': "Fenchurch", 'bar_list': [42, 42]}, - art['dict_prop']) - data = [{'op': 'replace', 'value': 24, - 'path': '/dict_prop/bar_list/0'}, - {'op': 'replace', 'value': 'cello lesson', - 'path': '/dict_prop/foo'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual({'foo': 'cello lesson', 'bar_list': [24, 42]}, - art_updated['dict_prop']) - - def test_update_empty_dict_property_by_replace_op(self): - art = self._create_artifact('withprops') - self.assertIsNone(art['dict_prop']) - data = [{'op': 'replace', 'value': "don't panic", - 'path': '/dict_prop/foo'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data, - status=http.BAD_REQUEST) - self.assertIn("The provided path 'dict_prop/foo' is invalid", - art_updated) - - def test_update_empty_dict_property_by_remove_op(self): - art = self._create_artifact('withprops') - self.assertIsNone(art['dict_prop']) - data = [{'op': 'remove', 'path': '/dict_prop/bar_list'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data, - status=http.BAD_REQUEST) - self.assertIn("The provided path 'dict_prop/bar_list' is invalid", - art_updated) - - def test_update_dict_property_by_remove_op(self): - art = self._create_artifact( - 'withprops', - data={'name': 'some art', 'version': '4.2', - 'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}}) - self.assertEqual({'foo': 'Fenchurch', 'bar_list': [42, 42]}, - art['dict_prop']) - data = [{'op': 'remove', 'path': '/dict_prop/foo'}, - {'op': 'remove', 'path': '/dict_prop/bar_list/1'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual({'bar_list': [42]}, art_updated['dict_prop']) - # now delete the whole dict - data = [{'op': 'remove', 'path': '/dict_prop'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertIsNone(art_updated['dict_prop']) - - @unittest.skip("Skipping due to a know bug") - def test_update_dict_property_change_values(self): - art = self._create_artifact( - 'withprops', data={'name': 'some art', 'version': '4.2', - 'dict_prop_strval': - {'foo': 'Fenchurch', 'bar': 'no value'}}) - self.assertEqual({'foo': 'Fenchurch', 'bar': 'no value'}, - art['dict_prop_strval']) - new_data = [{'op': 'replace', 'path': '/dict_prop_strval', - 'value': {'new-foo': 'Arthur Dent'}}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=new_data) - self.assertEqual({'new-foo': 'Arthur Dent'}, - art_updated['dict_prop_strval']) - - def test_update_array_property_by_remove_op(self): - art = self._create_artifact( - 'withprops', data={'name': 'some art', - 'version': '4.2', - 'prop_list': [1, 2, 3]}) - self.assertEqual([1, 2, 3], art['prop_list']) - data = [{'op': 'remove', 'path': '/prop_list/0'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual([2, 3], art_updated['prop_list']) - # remove last element - data = [{'op': 'remove', 'path': '/prop_list/-'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual([2], art_updated['prop_list']) - # now delete the whole array - data = [{'op': 'remove', 'path': '/prop_list'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertIsNone(art_updated['prop_list']) - - def test_update_array_property_by_add_op(self): - art = self._create_artifact( - 'withprops', data={'name': 'some art', - 'version': '4.2'}) - self.assertIsNone(art['prop_list']) - data = [{'op': 'add', 'path': '/prop_list', 'value': [2, 12, 0, 6]}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], data=data) - self.assertEqual([2, 12, 0, 6], art_updated['prop_list']) - data = [{'op': 'add', 'path': '/prop_list/2', 'value': 85}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], data=data) - self.assertEqual([2, 12, 85, 0, 6], art_updated['prop_list']) - # add where path='/array/-' means append to the end - data = [{'op': 'add', 'path': '/prop_list/-', 'value': 7}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], data=data) - self.assertEqual([2, 12, 85, 0, 6, 7], art_updated['prop_list']) - # an attempt to add an element to nonexistent position should - # result in 400 - self.assertEqual(6, len(art_updated['prop_list'])) - bad_index_data = [{'op': 'add', 'path': '/prop_list/11', - 'value': 42}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=bad_index_data, - status=http.BAD_REQUEST) - self.assertIn("The provided path 'prop_list/11' is invalid", - art_updated) - - def test_update_dict_property_by_add_op(self): - art = self._create_artifact("withprops") - self.assertIsNone(art['dict_prop']) - data = [{'op': 'add', 'path': '/dict_prop/foo', 'value': "some value"}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual({"foo": "some value"}, art_updated['dict_prop']) - - def test_update_empty_array_property_by_add_op(self): - """Test jsonpatch add. - - According to RFC 6902: - * if the array is empty, '/array/0' is a valid path - """ - create_data = {'name': 'new artifact', - 'version': '4.2'} - art = self._create_artifact('withprops', data=create_data) - self.assertIsNone(art['prop_list']) - data = [{'op': 'add', 'path': '/prop_list/0', 'value': 3}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual([3], art_updated['prop_list']) - - def test_update_tuple_property_by_index(self): - art = self._create_artifact( - 'withprops', data={'name': 'some art', - 'version': '4.2', - 'tuple_prop': [1, False]}) - self.assertEqual([1, False], art['tuple_prop']) - data = [{'op': 'replace', 'value': True, - 'path': '/tuple_prop/1'}, - {'op': 'replace', 'value': 2, - 'path': '/tuple_prop/0'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertEqual([2, True], art_updated['tuple_prop']) - - def test_update_artifact(self): - art = self._create_artifact('noprop') - self.assertEqual('artifact-1', art['name']) - art_updated = self._check_artifact_patch( - '/noprop/v1/%s' % art['id'], - data=[{'op': 'replace', 'value': '0.0.9', 'path': '/version'}]) - self.assertEqual('0.0.9', art_updated['version']) - - def test_update_artifact_properties(self): - art = self._create_artifact('withprops') - for prop in ['prop1', 'prop2']: - self.assertIsNone(art[prop]) - data = [{'op': 'replace', 'value': 'some value', - 'path': '/prop1'}] - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], data=data) - self.assertEqual('some value', art_updated['prop1']) - - def test_update_remove_non_existent_artifact_properties(self): - art = self._create_artifact('withprops') - for prop in ['prop1', 'prop2']: - self.assertIsNone(art[prop]) - data = [{'op': 'remove', 'value': 'some value', - 'path': '/non-existent-path/and-another'}] - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], data=data, - status=http.BAD_REQUEST) - self.assertIn('Artifact has no property', art_updated) - - def test_update_replace_non_existent_artifact_properties(self): - art = self._create_artifact('withprops') - for prop in ['prop1', 'prop2']: - self.assertIsNone(art[prop]) - data = [{'op': 'replace', 'value': 'some value', - 'path': '/non-existent-path/and-another'}] - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], data=data, - status=http.BAD_REQUEST) - self.assertIn('Artifact has no property', art_updated) - - def test_update_artifact_remove_property(self): - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'tags': ['gagaga', 'sesese'], - 'prop1': 'Arthur Dent', - 'prop2': 42} - art = self._create_artifact('withprops', data=artifact_data) - data = [{'op': 'remove', 'path': '/prop1'}] - art_updated = self._check_artifact_patch('/withprops/v1/%s' - % art['id'], - data=data) - self.assertIsNone(art_updated['prop1']) - self.assertEqual(42, art_updated['prop2']) - - def test_update_wrong_property_type(self): - art = self._create_artifact('withprops') - for prop in ['prop2', 'prop2']: - self.assertIsNone(art[prop]) - data = [{'op': 'replace', 'value': 123, 'path': '/prop1'}] - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], data=data, status=http.BAD_REQUEST) - self.assertIn("Property 'prop1' may not have value '123'", art_updated) - - def test_update_multiple_properties(self): - with_prop_art = self._create_artifact('withprops') - data = [{'op': 'replace', - 'path': '/prop1', - 'value': 'some value'}, - {'op': 'replace', - 'path': '/prop2', - 'value': 42}] - updated = self._check_artifact_patch( - '/withprops/v1/%s' % with_prop_art['id'], data=data) - self.assertEqual('some value', updated['prop1']) - self.assertEqual(42, updated['prop2']) - - def test_create_artifact_with_dependency(self): - no_prop_art = self._create_artifact('noprop') - art = self._create_artifact( - 'withprops', - data={"name": "name", "version": "42", - "depends_on": no_prop_art['id']}) - self.assertEqual(no_prop_art['id'], art['depends_on']['id']) - self.assertEqual(no_prop_art['name'], art['depends_on']['name']) - - def test_create_artifact_dependency_list(self): - no_prop_art1 = self._create_artifact('noprop') - no_prop_art2 = self._create_artifact('noprop') - art = self._create_artifact( - 'withprops', - data={"name": "name", "version": "42", - "depends_on_list": [no_prop_art1['id'], no_prop_art2['id']]}) - self.assertEqual(2, len(art['depends_on_list'])) - self.assertEqual([no_prop_art1['id'], no_prop_art2['id']], - map(lambda x: x['id'], art['depends_on_list'])) - - def test_create_dependency_list_same_id(self): - no_prop_art = self._create_artifact('noprop') - res = self._create_artifact( - 'withprops', - data={"name": "name", "version": "42", - "depends_on_list": [no_prop_art['id'], - no_prop_art['id']]}, - status=http.BAD_REQUEST) - self.assertIn("Items have to be unique", res) - - def test_create_artifact_bad_dependency_format(self): - """Invalid dependencies creation. - - Dependencies should be passed: - * as a list of ids if param is an ArtifactReferenceList - * as an id if param is an ArtifactReference - """ - no_prop_art = self._create_artifact('noprop') - art = self._check_artifact_post( - '/withprops/v1/drafts', - {"name": "name", "version": "42", - "depends_on": [no_prop_art['id']]}, status=http.BAD_REQUEST) - self.assertIn('Not a valid value type', art) - art = self._check_artifact_post( - '/withprops/v1.0/drafts', - {"name": "name", "version": "42", - "depends_on_list": no_prop_art['id']}, status=http.BAD_REQUEST) - self.assertIn('object is not iterable', art) - - def test_update_dependency(self): - no_prop_art = self._create_artifact('noprop') - no_prop_art1 = self._create_artifact('noprop') - with_prop_art = self._create_artifact('withprops') - data = [{'op': 'replace', - 'path': '/depends_on', - 'value': no_prop_art['id']}] - updated = self._check_artifact_patch( - '/withprops/v1/%s' % with_prop_art['id'], data=data) - self.assertEqual(no_prop_art['id'], updated['depends_on']['id']) - self.assertEqual(no_prop_art['name'], updated['depends_on']['name']) - data = [{'op': 'replace', - 'path': '/depends_on', - 'value': no_prop_art1['id']}] - # update again and make sure it changes - updated = self._check_artifact_patch( - '/withprops/v1/%s' % with_prop_art['id'], data=data) - self.assertEqual(no_prop_art1['id'], updated['depends_on']['id']) - self.assertEqual(no_prop_art1['name'], updated['depends_on']['name']) - - def test_update_dependency_circular_reference(self): - with_prop_art = self._create_artifact('withprops') - data = [{'op': 'replace', - 'path': '/depends_on', - 'value': [with_prop_art['id']]}] - not_updated = self._check_artifact_patch( - '/withprops/v1/%s' % with_prop_art['id'], data=data, - status=http.BAD_REQUEST) - self.assertIn('Artifact with a circular dependency can not be created', - not_updated) - - def test_publish_artifact(self): - art = self._create_artifact('withprops') - # now create dependency - no_prop_art = self._create_artifact('noprop') - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'value': no_prop_art['id'], - 'op': 'replace', - 'path': '/depends_on'}]) - self.assertNotEqual(0, len(art_updated['depends_on'])) - # artifact can't be published if any dependency is in non-active state - res = self._check_artifact_post( - '/withprops/v1/%s/publish' % art['id'], {}, - status=http.BAD_REQUEST) - self.assertIn("Not all dependencies are in 'active' state", res) - # after you publish the dependency -> artifact can be published - dep_published = self._check_artifact_post( - '/noprop/v1/%s/publish' % no_prop_art['id'], {}, status=http.OK) - self.assertEqual('active', dep_published['state']) - art_published = self._check_artifact_post( - '/withprops/v1.0/%s/publish' % art['id'], {}, status=http.OK) - self.assertEqual('active', art_published['state']) - - def test_no_mutable_change_in_published_state(self): - art = self._create_artifact('withprops') - no_prop_art = self._create_artifact('noprop') - no_prop_other = self._create_artifact('noprop') - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'value': no_prop_art['id'], - 'op': 'replace', - 'path': '/depends_on'}]) - self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id']) - # now change dependency to some other artifact - art_updated = self._check_artifact_patch( - '/withprops/v1/%s' % art['id'], - data=[{'value': no_prop_other['id'], - 'op': 'replace', - 'path': '/depends_on'}]) - self.assertEqual(no_prop_other['id'], art_updated['depends_on']['id']) - # publish dependency - dep_published = self._check_artifact_post( - '/noprop/v1/%s/publish' % no_prop_other['id'], {}, status=http.OK) - self.assertEqual('active', dep_published['state']) - # publish artifact - art_published = self._check_artifact_post( - '/withprops/v1.0/%s/publish' % art['id'], {}, status=http.OK) - self.assertEqual('active', art_published['state']) - # try to change dependency, should fail as already published - res = self._check_artifact_patch( - '/withprops/v1/%s' % art_published['id'], - data=[{'op': 'remove', 'path': '/depends_on'}], - status=http.BAD_REQUEST) - self.assertIn('Attempt to set value of immutable property', res) - - def test_create_artifact_empty_body(self): - self._check_artifact_post('/noprop/v1.0/drafts', {}, http.BAD_REQUEST) - - def test_create_artifact_insufficient_arguments(self): - self._check_artifact_post('/noprop/v1.0/drafts', - {'name': 'some name, no version'}, - status=http.BAD_REQUEST) - - def test_create_artifact_no_such_version(self): - """Creation impossible without specifying a correct version. - - An attempt to create an artifact out of existing plugin but with - a wrong version should result in - 400 BadRequest 'No such plugin has been loaded' - """ - # make sure there is no such artifact noprop - self._check_artifact_get('/noprop/v0.0.9', http.BAD_REQUEST) - artifact_data = {'name': 'artifact-1', - 'version': '12'} - msg = self._check_artifact_post('/noprop/v0.0.9/drafts', - artifact_data, - status=http.BAD_REQUEST) - self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded", - msg) - - def test_create_artifact_no_type_version_specified(self): - """Creation impossible without specifying a version. - - It should not be possible to create an artifact out of existing plugin - without specifying any version - """ - artifact_data = {'name': 'artifact-1', - 'version': '12'} - self._check_artifact_post('/noprop/drafts', artifact_data, - http.NOT_FOUND) - - def test_create_artifact_no_properties(self): - """Create an artifact with minimum parameters""" - artifact_data = {'name': 'artifact-1', - 'version': '12'} - artifact = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - # verify that all fields have the values expected - expected_artifact = { - 'state': 'creating', - 'name': 'artifact-1', - 'version': '12.0.0', - 'tags': [], - 'visibility': 'private', - 'type_name': 'WithProps', - 'type_version': '1.0', - 'prop1': None, - 'prop2': None - } - for key, value in expected_artifact.items(): - self.assertEqual(artifact[key], value, key) - - def test_create_artifact_with_properties(self): - """Create an artifact (with two deployer-defined properties)""" - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'tags': ['gagaga', 'sesese'], - 'prop1': 'Arthur Dent', - 'prop2': 42} - artifact = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - expected_artifact = { - 'state': 'creating', - 'name': 'artifact-1', - 'version': '12.0.0', - 'tags': ['gagaga', 'sesese'], - 'visibility': 'private', - 'type_name': 'WithProps', - 'type_version': '1.0', - 'prop1': 'Arthur Dent', - 'prop2': 42 - } - for key, value in expected_artifact.items(): - self.assertEqual(artifact[key], value, key) - - def test_create_artifact_not_all_properties(self): - """Create artifact with minimal properties. - - Checks that it is possible to create an artifact by passing all - required properties but omitting some not required - """ - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'visibility': 'private', - 'tags': ['gagaga', 'sesese'], - 'prop1': 'i am ok'} - artifact = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - expected_artifact = { - 'state': 'creating', - 'name': 'artifact-1', - 'version': '12.0.0', - 'tags': ['gagaga', 'sesese'], - 'visibility': 'private', - 'type_name': 'WithProps', - 'type_version': '1.0', - 'prop1': 'i am ok', - 'prop2': None} - for key, value in expected_artifact.items(): - self.assertEqual(artifact[key], value, key) - # now check creation with no properties specified - for prop in ['prop1', 'prop2']: - artifact_data.pop(prop, '') - artifact = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - for prop in ['prop1', 'prop2']: - self.assertIsNone(artifact[prop]) - - def test_create_artifact_invalid_properties(self): - """Any attempt to pass invalid properties should result in 400""" - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'prop1': 1} - res = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data, - status=http.BAD_REQUEST) - self.assertIn("Property 'prop1' may not have value '1'", res) - artifact_data.pop('prop1') - artifact_data['nosuchprop'] = "Random" - res = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data, - status=http.BAD_REQUEST) - self.assertIn("Artifact has no property nosuchprop", res) - - def test_create_public_artifact(self): - """Create an artifact with visibility set to public""" - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'visibility': 'public'} - artifact = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - # verify that all fields have the values expected - expected_artifact = { - 'state': 'creating', - 'name': 'artifact-1', - 'version': '12.0.0', - 'tags': [], - 'visibility': 'public', - 'type_name': 'WithProps', - 'type_version': '1.0', - 'prop1': None, - 'prop2': None - } - for key, value in expected_artifact.items(): - self.assertEqual(artifact[key], value, key) - - def test_upload_file(self): - # Upload some data to an artifact - art = self._create_artifact('withblob') - headers = self._headers({'Content-Type': 'application/octet-stream'}) - self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], - headers=headers, - data='ZZZZZ', status=http.OK) - - def test_upload_file_with_invalid_content_type(self): - art = self._create_artifact('withblob') - data = {'data': 'jjjjjj'} - res = self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], - data=data, status=http.BAD_REQUEST) - self.assertIn('Invalid Content-Type for work with blob1', res) - - res = self._check_artifact_post('/withblob/v1/%s/blob_list' - % art['id'], - data=data, status=http.BAD_REQUEST) - self.assertIn('Invalid Content-Type for work with blob_list', res) - - def test_upload_list_files(self): - art = self._create_artifact('withblob') - headers = self._headers({'Content-Type': 'application/octet-stream'}) - self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'], - headers=headers, - data='ZZZZZ', status=http.OK) - self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'], - headers=headers, - data='YYYYY', status=http.OK) - - def test_download_file(self): - # Download some data from an artifact - art = self._create_artifact('withblob') - artifact_id = art['id'] - headers = self._headers({'Content-Type': 'application/octet-stream'}) - self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], - headers=headers, - data='ZZZZZ', status=http.OK) - - art = self._check_artifact_get('/withblob/%s' % artifact_id) - self.assertEqual(artifact_id, art['id']) - self.assertIn('download_link', art['blob1']) - - data = self._check_artifact_get( - '/withblob/%s/blob1/download' % art['id']) - self.assertEqual('ZZZZZ', data) - - def test_file_w_unknown_size(self): - # Upload and download data provided by an iterator, thus without - # knowing the length in advance - art = self._create_artifact('withblob') - artifact_id = art['id'] - - def iterate_string(val): - for char in val: - yield char - - headers = self._headers({'Content-Type': 'application/octet-stream'}) - self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], - headers=headers, - data=iterate_string('ZZZZZ'), status=http.OK) - - art = self._check_artifact_get('/withblob/%s' % artifact_id) - self.assertEqual(artifact_id, art['id']) - self.assertIn('download_link', art['blob1']) - - data = self._check_artifact_get( - '/withblob/%s/blob1/download' % art['id']) - self.assertEqual('ZZZZZ', data) - - def test_limit(self): - artifact_data = {'name': 'artifact-1', - 'version': '12'} - self._check_artifact_post('/withprops/v1/drafts', - artifact_data) - artifact_data = {'name': 'artifact-1', - 'version': '13'} - self._check_artifact_post('/withprops/v1/drafts', - artifact_data) - result = self._check_artifact_get('/withprops/v1/drafts') - self.assertEqual(2, len(result["artifacts"])) - result = self._check_artifact_get('/withprops/v1/drafts?limit=1') - self.assertEqual(1, len(result["artifacts"])) - - def _check_sorting_order(self, expected, actual): - for e, a in zip(expected, actual): - self.assertEqual(e['name'], a['name']) - self.assertEqual(e['version'], a['version']) - self.assertEqual(e['prop1'], a['prop1']) - - def test_sort(self): - artifact_data = {'name': 'artifact-1', - 'version': '12', - 'prop1': 'lala'} - art1 = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - artifact_data = {'name': 'artifact-2', - 'version': '13', - 'prop1': 'lala'} - art2 = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - artifact_data = {'name': 'artifact-3', - 'version': '13', - 'prop1': 'tutu'} - art3 = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - artifact_data = {'name': 'artifact-4', - 'version': '13', - 'prop1': 'hyhy'} - art4 = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - artifact_data = {'name': 'artifact-5', - 'version': '13', - 'prop1': 'bebe'} - art5 = self._check_artifact_post('/withprops/v1.0/drafts', - artifact_data) - - result = self._check_artifact_get( - '/withprops/v1.0/drafts?sort=name')["artifacts"] - self.assertEqual(5, len(result)) - - # default direction is 'desc' - expected = [art5, art4, art3, art2, art1] - self._check_sorting_order(expected, result) - - result = self._check_artifact_get( - '/withprops/v1.0/drafts?sort=name:asc')["artifacts"] - self.assertEqual(5, len(result)) - - expected = [art1, art2, art3, art4, art5] - self._check_sorting_order(expected, result) - - result = self._check_artifact_get( - '/withprops/v1.0/drafts?sort=version:asc,prop1')["artifacts"] - self.assertEqual(5, len(result)) - - expected = [art1, art3, art2, art4, art5] - self._check_sorting_order(expected, result) - - def test_update_property(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - # update single integer property via PUT - upd = self._check_artifact_put('/withprops/v1.0/%s/prop2' % art['id'], - data={'data': 15}) - self.assertEqual(15, upd['prop2']) - # create list property via PUT - upd = self._check_artifact_put( - '/withprops/v1.0/%s/tuple_prop' % art['id'], - data={'data': [42, True]}) - self.assertEqual([42, True], upd['tuple_prop']) - # change list property via PUT - upd = self._check_artifact_put( - '/withprops/v1.0/%s/tuple_prop/0' % art['id'], data={'data': 24}) - self.assertEqual([24, True], upd['tuple_prop']) - # append to list property via POST - upd = self._check_artifact_post( - '/withprops/v1.0/%s/prop_list' % art['id'], data={'data': [11]}, - status=http.OK) - self.assertEqual([11], upd['prop_list']) - # append to list property via POST - upd = self._check_artifact_post( - '/withprops/v1.0/%s/prop_list/-' % art['id'], - status=http.OK, data={'data': 10}) - self.assertEqual([11, 10], upd['prop_list']) - - def test_bad_update_property(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - # try to update nonexistent property - upd = self._check_artifact_put( - '/withprops/v1.0/%s/nosuchprop' % art['id'], - data={'data': 'wont be set'}, status=http.BAD_REQUEST) - self.assertIn('Artifact has no property nosuchprop', upd) - # try to pass wrong property value - upd = self._check_artifact_put( - '/withprops/v1.0/%s/tuple_prop' % art['id'], - data={'data': ['should be an int', False]}, - status=http.BAD_REQUEST) - self.assertIn("Property 'tuple_prop[0]' may not have value", upd) - # try to pass bad body (not a valid json) - upd = self._check_artifact_put( - '/withprops/v1.0/%s/tuple_prop' % art['id'], data="not a json", - status=http.BAD_REQUEST) - self.assertIn("Invalid json body", upd) - # try to pass json body invalid under schema - upd = self._check_artifact_put( - '/withprops/v1.0/%s/tuple_prop' % art['id'], - data={"bad": "schema"}, status=http.BAD_REQUEST) - self.assertIn("Invalid json body", upd) - - def test_update_different_depths_levels(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - upd = self._check_artifact_post( - '/withprops/v1.0/%s/dict_prop' % art['id'], - data={'data': {'foo': 'some value'}}, status=http.OK) - self.assertEqual({'foo': 'some value'}, upd['dict_prop']) - upd = self._check_artifact_post( - '/withprops/v1.0/%s/dict_prop/bar_list' % art['id'], - data={'data': [5]}, status=http.OK) - self.assertEqual({'foo': 'some value', 'bar_list': [5]}, - upd['dict_prop']) - upd = self._check_artifact_post( - '/withprops/v1.0/%s/dict_prop/bar_list/0' % art['id'], - data={'data': 15}, status=http.OK) - self.assertEqual({'foo': 'some value', 'bar_list': [5, 15]}, - upd['dict_prop']) - # try to attempt dict_property by nonexistent path - upd = self._check_artifact_post( - '/withprops/v1.0/%s/dict_prop/bar_list/nosuchkey' % art['id'], - data={'data': 15}, status=http.BAD_REQUEST) - - def test_artifact_inaccessible_by_different_user(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - self._set_user('user2') - self._check_artifact_get('/withprops/%s' % art['id'], http.NOT_FOUND) - - def test_artifact_accessible_by_admin(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - self._set_user('admin') - self._check_artifact_get('/withprops/%s' % art['id'], http.OK) - - def test_public_artifact_accessible_by_different_user(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - self._check_artifact_patch( - '/withprops/v1.0/%s' % art['id'], - data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) - self._set_user('user2') - self._check_artifact_get('/withprops/%s' % art['id'], http.OK) - - def test_public_artifact_not_editable_by_different_user(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - self._check_artifact_patch( - '/withprops/v1.0/%s' % art['id'], - data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) - self._set_user('user2') - self._check_artifact_patch( - '/withprops/v1.0/%s' % art['id'], - data=[{'op': 'replace', 'value': 'private', - 'path': '/visibility'}], status=http.FORBIDDEN) - - def test_public_artifact_editable_by_admin(self): - data = {'name': 'an artifact', - 'version': '42'} - art = self._create_artifact('withprops', data=data) - self._check_artifact_patch( - '/withprops/v1.0/%s' % art['id'], - data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) - self._set_user('admin') - self._check_artifact_patch( - '/withprops/v1.0/%s' % art['id'], - data=[{'op': 'replace', 'value': 'private', - 'path': '/visibility'}], status=http.OK) - - def test_list_artifact_types(self): - actual = { - u'artifact_types': [ - {u'displayed_name': u'NoProp', - u'type_name': u'NoProp', - u'versions': - [{u'id': u'v0.5', - u'link': u'http://127.0.0.1:%d/v0.1/' - u'artifacts/noprop/v0.5' - % self.api_port}, - {u'id': u'v1.0', - u'link': u'http://127.0.0.1:%d/v0.1/' - u'artifacts/noprop/v1.0' - % self.api_port}]}, - {u'displayed_name': u'WithBlob', - u'type_name': u'WithBlob', - u'versions': - [{u'id': u'v1.0', - u'link': - u'http://127.0.0.1:%d/v0.1/artifacts/withblob/v1.0' - % self.api_port}]}, - {u'displayed_name': u'WithProps', - u'type_name': u'WithProps', - u'versions': - [{u'id': u'v1.0', - u'link': - u'http://127.0.0.1:%d/v0.1/artifacts/withprops/v1.0' - % self.api_port}]}]} - - response = self._check_artifact_get("", status=http.OK) - response[u'artifact_types'].sort(key=lambda x: x[u'type_name']) - for artifact_type in response[u'artifact_types']: - artifact_type[u'versions'].sort(key=lambda x: x[u'id']) - - self.assertEqual(actual, response) - - def test_invalid_content_type(self): - data = {'name': 'name1', 'version': '2.2'} - self._check_artifact_post('/withprops/v1.0/drafts', - data=data, - status=http.BAD_REQUEST, - headers={'Content-Type': 'lalala'}) - - def test_filter_by_non_dict_props(self): - data = {'name': 'art1', - 'version': '4.2', - 'prop2': 12 - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art2', - 'version': '4.2', - 'prop2': 10 - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art3', - 'version': '4.2', - 'prop2': 10 - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art4', - 'version': '4.3', - 'prop2': 33 - } - self._create_artifact('withprops', data=data) - - result = self._check_artifact_get( - '/withprops/v1.0/drafts?name=art2')['artifacts'] - self.assertEqual(1, len(result)) - - result = self._check_artifact_get( - '/withprops/v1.0/drafts?prop2=10')['artifacts'] - self.assertEqual(2, len(result)) - - def test_filter_by_dict_props(self): - data = {'name': 'art1', - 'version': '4.2', - 'dict_prop': - {'foo': 'Moscow', - 'bar_list': [42, 44]} - } - self._create_artifact('withprops', data=data) - data = {'name': 'art2', - 'version': '4.2', - 'dict_prop': - {'foo': 'Saratov', - 'bar_list': [42, 42]} - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art3', - 'version': '4.2', - 'dict_prop': - {'foo': 'Saratov', - 'bar_list': [42, 44]} - } - self._create_artifact('withprops', data=data) - - url = '/withprops/v1.0/drafts?dict_prop.foo=Saratov' - result = self._check_artifact_get(url=url) - - self.assertEqual(2, len(result)) - - url = '/withprops/v1.0/drafts?dict_prop.bar_list=44' - result = self._check_artifact_get(url=url) - - self.assertEqual(2, len(result)) - - def test_transformation_versions(self): - data = {'name': 'art1', - 'version': '1'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art2', - 'version': '1.0'} - art2 = self._create_artifact('noprop', data=data) - - v1 = art1.get("version") - v2 = art2.get("version") - - self.assertEqual('1.0.0', v1) - self.assertEqual('1.0.0', v2) - - def test_filter_by_ge_version(self): - data = {'name': 'art1', - 'version': '4.0.0'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.0.1'} - art2 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-1'} - art3 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art4 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0'} - art5 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '5.0.0'} - art6 = self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.0.1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art2, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:5.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art6] - self.assertEqual(actual, result) - - def test_filter_by_gt_version(self): - data = {'name': 'art1', - 'version': '4.0.0'} - self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.0.1'} - art2 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-1'} - art3 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art4 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0'} - art5 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '5.0.0'} - art6 = self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:4.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art2, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:4.0.1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art6] - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=gt:5.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [] - self.assertEqual(actual, result) - - def test_filter_by_le_version(self): - data = {'name': 'art1', - 'version': '4.0.0'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.0.1'} - art2 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-1'} - art3 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art4 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0'} - art5 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '5.0.0'} - art6 = self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art1] - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.0.1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art5] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:5.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - def test_filter_by_lt_version(self): - data = {'name': 'art1', - 'version': '4.0.0'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.0.1'} - art2 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-1'} - art3 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art4 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0'} - art5 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '5.0.0'} - self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:4.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [] - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:4.0.1' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art1] - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=lt:5.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art5] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - def test_filter_by_ne_version(self): - data = {'name': 'art1', - 'version': '4.0.0'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.0.1'} - art2 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-1'} - art3 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art4 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0'} - art5 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '5.0.0'} - art6 = self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:4.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art2, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:4.0.1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art3, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art4, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art5, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art6] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ne:5.0.0' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2, art3, art4, art5] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - def test_filter_by_pre_release_version(self): - data = {'name': 'art1', - 'version': '4.2.0-1'} - art1 = self._create_artifact('noprop', data=data) - - data = {'name': 'art1', - 'version': '4.2.0-2'} - art2 = self._create_artifact('noprop', data=data) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art2] - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-2' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - result.sort(key=lambda x: x['id']) - - actual = [art1, art2] - actual.sort(key=lambda x: x['id']) - self.assertEqual(actual, result) - - url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-1' - result = self._check_artifact_get(url=url)['artifacts'] - actual = [art1] - self.assertEqual(actual, result) - - def test_filter_by_range_props(self): - data = {'name': 'art1', - 'version': '4.2', - 'prop2': 10 - } - self._create_artifact('withprops', data=data) - data = {'name': 'art2', - 'version': '4.2', - 'prop2': 100 - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art3', - 'version': '4.2', - 'prop2': 1000 - } - self._create_artifact('withprops', data=data) - - url = '/withprops/v1.0/drafts?prop2=gt:99&prop2=lt:101' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(1, len(result)) - - url = '/withprops/v1.0/drafts?prop2=gt:99&prop2=lt:2000' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(2, len(result)) - - def test_filter_by_tags(self): - data = {'name': 'art1', - 'version': '4.2', - 'tags': ['hyhyhy', 'tytyty'] - } - self._create_artifact('withprops', data=data) - data = {'name': 'art2', - 'version': '4.2', - 'tags': ['hyhyhy', 'cicici'] - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art3', - 'version': '4.2', - 'tags': ['ededed', 'bobobo'] - } - self._create_artifact('withprops', data=data) - - url = '/withprops/v1.0/drafts?tags=hyhyhy' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(2, len(result)) - - url = '/withprops/v1.0/drafts?tags=cicici&tags=hyhyhy' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(1, len(result)) - - def test_filter_by_latest_version(self): - data = {'name': 'art1', - 'version': '1.2', - 'tags': ['hyhyhy', 'tytyty'] - } - self._create_artifact('withprops', data=data) - data = {'name': 'latest_artifact', - 'version': '3.2', - 'tags': ['hyhyhy', 'cicici'] - } - self._create_artifact('withprops', data=data) - - data = {'name': 'latest_artifact', - 'version': '3.2', - 'tags': ['ededed', 'bobobo'] - } - self._create_artifact('withprops', data=data) - - url = '/withprops/v1.0/drafts?version=latest&name=latest_artifact' - result = self._check_artifact_get(url=url) - - self.assertEqual(2, len(result)) - - url = '/withprops/v1.0/drafts?version=latest' - self._check_artifact_get(url=url, status=http.BAD_REQUEST) - - def test_filter_by_version_only(self): - data = {'name': 'art1', - 'version': '3.2' - } - self._create_artifact('withprops', data=data) - data = {'name': 'art2', - 'version': '4.2' - } - self._create_artifact('withprops', data=data) - - data = {'name': 'art3', - 'version': '4.3' - } - self._create_artifact('withprops', data=data) - - url = '/withprops/v1.0/drafts?version=gt:4.0&version=lt:10.1' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(2, len(result)) - - url = '/withprops/v1.0/drafts?version=gt:4.0&version=ne:4.3' - result = self._check_artifact_get(url=url)['artifacts'] - - self.assertEqual(1, len(result)) - - def test_operation_patch_with_blob(self): - data = {'name': 'art1', - 'version': '3.2' - } - art = self._create_artifact('withblob', data=data) - - msg = 'Invalid request PATCH for work with blob' - - result = self._check_artifact_patch( - '/withblob/v1.0/%s' % art['id'], - status=http.BAD_REQUEST, - data=[{'op': 'replace', - 'value': 'public', - 'path': '/blob1'}]) - self.assertIn(msg, result) - - result = self._check_artifact_patch( - '/withblob/v1.0/%s' % art['id'], - status=http.BAD_REQUEST, - data=[{'op': 'remove', - 'value': 'public', - 'path': '/blob1'}]) - self.assertIn(msg, result) - - result = self._check_artifact_patch( - '/withblob/v1.0/%s' % art['id'], - status=http.BAD_REQUEST, - data=[{'op': 'add', - 'value': 'public', - 'path': '/blob1'}]) - self.assertIn(msg, result) - - def test_filter_by_bad_version(self): - bad_versions = ['kkk', '1.k', 'h.0', '1.3.hf', 's.9.2s2'] - response_string = ('The format of the version %s is not valid. ' - 'Use semver notation') - for bad_version in bad_versions: - url = '/withprops/v1.0/drafts?version=gt:%s' % bad_version - result = self._check_artifact_get(url=url, status=http.BAD_REQUEST) - self.assertIn(response_string % bad_version, result) - - def test_circular_dependency(self): - data = {'name': 'artifact', - 'version': '12'} - art = self._create_artifact('withprops', data=data) - - upd = self._check_artifact_post( - '/withprops/v1.0/%s/depends_on' % art['id'], - data={'data': art['id']}, status=http.BAD_REQUEST) - self.assertIn( - 'Artifact with a circular dependency can not be created', upd) diff --git a/glance/tests/unit/common/test_semver.py b/glance/tests/unit/common/test_semver.py deleted file mode 100644 index 4842f7eee6..0000000000 --- a/glance/tests/unit/common/test_semver.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from glance.common import exception -from glance.common import semver_db -from glance.tests import utils as test_utils - - -class SemVerTestCase(test_utils.BaseTestCase): - def test_long_conversion(self): - initial = '1.2.3-beta+07.17.2014' - v = semver_db.parse(initial) - l, prerelease, build = v.__composite_values__() - v2 = semver_db.DBVersion(l, prerelease, build) - self.assertEqual(initial, str(v2)) - - def test_major_comparison_as_long(self): - v1 = semver_db.parse("1.1.100") - v2 = semver_db.parse("2.0.0") - self.assertTrue(v2.__composite_values__()[0] > - v1.__composite_values__()[0]) - - def test_minor_comparison_as_long(self): - v1 = semver_db.parse("1.1.100") - v2 = semver_db.parse("2.0.0") - self.assertTrue(v2.__composite_values__()[0] > - v1.__composite_values__()[0]) - - def test_patch_comparison_as_long(self): - v1 = semver_db.parse("1.1.1") - v2 = semver_db.parse("1.1.100") - self.assertTrue(v2.__composite_values__()[0] > - v1.__composite_values__()[0]) - - def test_label_comparison_as_long(self): - v1 = semver_db.parse("1.1.1-alpha") - v2 = semver_db.parse("1.1.1") - self.assertTrue(v2.__composite_values__()[0] > - v1.__composite_values__()[0]) - - def test_label_comparison_as_string(self): - versions = [ - semver_db.parse("1.1.1-0.10.a.23.y.255").__composite_values__()[1], - semver_db.parse("1.1.1-0.10.z.23.x.255").__composite_values__()[1], - semver_db.parse("1.1.1-0.10.z.23.y.255").__composite_values__()[1], - semver_db.parse("1.1.1-0.10.z.23.y.256").__composite_values__()[1], - semver_db.parse("1.1.1-0.10.z.24.y.255").__composite_values__()[1], - semver_db.parse("1.1.1-0.11.z.24.y.255").__composite_values__()[1], - semver_db.parse("1.1.1-1.11.z.24.y.255").__composite_values__()[1], - semver_db.parse("1.1.1-alp.1.2.3.4.5.6").__composite_values__()[1]] - for i in range(len(versions) - 1): - self.assertLess(versions[i], versions[i + 1]) - - def test_too_large_version(self): - version1 = '1.1.65536' - version2 = '1.65536.1' - version3 = '65536.1.1' - self.assertRaises(exception.InvalidVersion, semver_db.parse, version1) - self.assertRaises(exception.InvalidVersion, semver_db.parse, version2) - self.assertRaises(exception.InvalidVersion, semver_db.parse, version3) - - def test_too_long_numeric_segments(self): - version = semver_db.parse('1.0.0-alpha.1234567') - self.assertRaises(exception.InvalidVersion, - version.__composite_values__) diff --git a/glance/tests/unit/test_domain.py b/glance/tests/unit/test_domain.py index 6cd1b85c71..5ab5cf79d5 100644 --- a/glance/tests/unit/test_domain.py +++ b/glance/tests/unit/test_domain.py @@ -24,10 +24,8 @@ import oslo_utils.importutils import glance.async from glance.async import taskflow_executor from glance.common import exception -from glance.common.glare import definitions from glance.common import timeutils from glance import domain -from glance.glare import domain as artifacts_domain import glance.tests.utils as test_utils @@ -575,22 +573,3 @@ class TestTaskExecutorFactory(test_utils.BaseTestCase): # NOTE(flaper87): "eventlet" executor. short name to avoid > 79. te_evnt = task_executor_factory.new_task_executor(context) self.assertIsInstance(te_evnt, taskflow_executor.TaskExecutor) - - -class TestArtifact(definitions.ArtifactType): - prop1 = definitions.Dict() - prop2 = definitions.Integer(min_value=10) - - -class TestArtifactTypeFactory(test_utils.BaseTestCase): - - def setUp(self): - super(TestArtifactTypeFactory, self).setUp() - context = mock.Mock(owner='me') - self.factory = artifacts_domain.ArtifactFactory(context, TestArtifact) - - def test_new_artifact_min_params(self): - artifact = self.factory.new_artifact("foo", "1.0.0-alpha") - self.assertEqual('creating', artifact.state) - self.assertEqual('me', artifact.owner) - self.assertIsNotNone(artifact.id) diff --git a/glance/tests/unit/test_glare_plugin_loader.py b/glance/tests/unit/test_glare_plugin_loader.py deleted file mode 100644 index 51d192b1f5..0000000000 --- a/glance/tests/unit/test_glare_plugin_loader.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from stevedore import extension - -from glance.common import exception -from glance.common.glare import loader -from glance.contrib.plugins.artifacts_sample.v1 import artifact as art1 -from glance.contrib.plugins.artifacts_sample.v2 import artifact as art2 -from glance.tests import utils - - -class MyArtifactDuplicate(art1.MyArtifact): - __type_version__ = '1.0.1' - __type_name__ = 'MyArtifact' - - -class MyArtifactOk(art1.MyArtifact): - __type_version__ = '1.0.2' - __type_name__ = 'MyArtifact' - - -class TestArtifactsLoader(utils.BaseTestCase): - def setUp(self): - self._setup_loader([('MyArtifact', art1.MyArtifact)]) - super(TestArtifactsLoader, self).setUp() - - def _setup_loader(self, artifacts): - self.loader = None - self.extensions = [ - extension.Extension( - name=a[0], - entry_point=mock.Mock(), - plugin=a[1], - obj=None, - ) - for a in artifacts - ] - test_plugins = extension.ExtensionManager.make_test_instance( - extensions=self.extensions, - propagate_map_exceptions=True, - ) - self.loader = loader.ArtifactsPluginLoader( - 'glance.artifacts.types', - test_plugins=test_plugins, - ) - - def test_load(self): - """ - Plugins can be loaded as entrypoint=single plugin and - entrypoint=[a, list, of, plugins] - """ - # single version - self.assertEqual(1, len(self.loader.mgr.extensions)) - self.assertEqual(art1.MyArtifact, - self.loader.get_class_by_endpoint('myartifact')) - # entrypoint = [a, list] - self._setup_loader([ - ('MyArtifact', MyArtifactOk), - ('MyArtifact', art2.MyArtifact), - ('MyArtifact', art1.MyArtifact), - ]) - self.assertEqual(3, len(self.loader.mgr.extensions)) - # returns the plugin with the latest version - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_endpoint('myartifact')) - self.assertEqual(art1.MyArtifact, - self.loader.get_class_by_endpoint('myartifact', - '1.0.1')) - - def test_basic_loader_func(self): - """Test public methods of PluginLoader class here""" - # type_version 2 == 2.0 == 2.0.0 - self._setup_loader([('MyArtifact', art2.MyArtifact)]) - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_endpoint('myartifact')) - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_endpoint('myartifact', - '2.0')) - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_endpoint('myartifact', - '2.0.0')) - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_endpoint('myartifact', - '2')) - # now make sure that get_class_by_typename works as well - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_typename('MyArtifact')) - self.assertEqual(art2.MyArtifact, - self.loader.get_class_by_typename('MyArtifact', '2')) - - def test_config_validation(self): - """ - Plugins can be loaded on certain conditions: - * entry point name == type_name - * no plugin with the same type_name and version has been already - loaded - """ - # here artifacts specific validation is checked - self.assertRaises(exception.ArtifactNonMatchingTypeName, - self._setup_loader, - [('non_matching_name', art1.MyArtifact)], - ) - # make sure this call is ok - self._setup_loader([('MyArtifact', art1.MyArtifact)]) - art_type = self.loader.get_class_by_endpoint('myartifact') - self.assertEqual('MyArtifact', art_type.metadata.type_name) - self.assertEqual('1.0.1', art_type.metadata.type_version) - # now try to add duplicate artifact with the same type_name and - # type_version as already exists - self.assertEqual(art_type.metadata.type_version, - MyArtifactDuplicate.metadata.type_version) - self.assertEqual(art_type.metadata.type_name, - MyArtifactDuplicate.metadata.type_name) - # should raise an exception as (name, version) is not unique - self.assertRaises( - exception.ArtifactDuplicateNameTypeVersion, self._setup_loader, - [('MyArtifact', art1.MyArtifact), - ('MyArtifact', MyArtifactDuplicate)]) - # two artifacts with the same name but different versions coexist fine - self.assertEqual('MyArtifact', MyArtifactOk.metadata.type_name) - self.assertNotEqual(art_type.metadata.type_version, - MyArtifactOk.metadata.type_version) - self._setup_loader([('MyArtifact', art1.MyArtifact), - ('MyArtifact', MyArtifactOk)]) - - def test_check_function(self): - """ - A test to show that plugin-load specific options in artifacts.conf - are correctly processed: - - * no plugins can be loaded if load_enabled = False - * if available_plugins list is given only plugins specified can be - be loaded - - """ - self.config(load_enabled=False) - self._setup_loader([('MyArtifact', art1.MyArtifact)]) - checker = self.loader._gen_check_func() - self.assertRaises( - exception.ArtifactLoadError, - checker, - self.extensions[0], - ) - self.config(load_enabled=True, available_plugins=['MyArtifact-1.0.2']) - self._setup_loader([('MyArtifact', art1.MyArtifact)]) - checker = self.loader._gen_check_func() - self.assertRaises( - exception.ArtifactLoadError, - checker, - self.extensions[0], - ) - self._setup_loader([('MyArtifact', MyArtifactOk)]) - # make sure that plugin_map has the expected plugin - self.assertEqual(MyArtifactOk, - self.loader.get_class_by_endpoint('myartifact', - '1.0.2')) diff --git a/glance/tests/unit/test_glare_type_definition_framework.py b/glance/tests/unit/test_glare_type_definition_framework.py deleted file mode 100644 index b65be00a1b..0000000000 --- a/glance/tests/unit/test_glare_type_definition_framework.py +++ /dev/null @@ -1,1128 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import functools - -import mock -import six - -import glance.common.exception as exc -from glance.common.glare import declarative -import glance.common.glare.definitions as defs -from glance.common.glare import serialization -import glance.tests.utils as test_utils - - -BASE = declarative.get_declarative_base() - - -class TestDeclarativeProperties(test_utils.BaseTestCase): - def test_artifact_type_properties(self): - class SomeTypeWithNoExplicitName(BASE): - some_attr = declarative.AttributeDefinition() - - class InheritedType(SomeTypeWithNoExplicitName): - __type_version__ = '1.0' - __type_name__ = 'ExplicitName' - __type_description__ = 'Type description' - __type_display_name__ = 'EXPLICIT_NAME' - __endpoint__ = 'some_endpoint' - - some_attr = declarative.AttributeDefinition(display_name='NAME') - - base_type = SomeTypeWithNoExplicitName - base_instance = SomeTypeWithNoExplicitName() - self.assertIsNotNone(base_type.metadata) - self.assertIsNotNone(base_instance.metadata) - self.assertEqual(base_type.metadata, base_instance.metadata) - self.assertEqual("SomeTypeWithNoExplicitName", - base_type.metadata.type_name) - self.assertEqual("SomeTypeWithNoExplicitName", - base_type.metadata.type_display_name) - self.assertEqual("1.0", base_type.metadata.type_version) - self.assertIsNone(base_type.metadata.type_description) - self.assertEqual('sometypewithnoexplicitname', - base_type.metadata.endpoint) - - self.assertIsNone(base_instance.some_attr) - self.assertIsNotNone(base_type.some_attr) - self.assertEqual(base_type.some_attr, - base_instance.metadata.attributes.all['some_attr']) - self.assertEqual('some_attr', base_type.some_attr.name) - self.assertEqual('some_attr', base_type.some_attr.display_name) - self.assertIsNone(base_type.some_attr.description) - - derived_type = InheritedType - derived_instance = InheritedType() - - self.assertIsNotNone(derived_type.metadata) - self.assertIsNotNone(derived_instance.metadata) - self.assertEqual(derived_type.metadata, derived_instance.metadata) - self.assertEqual('ExplicitName', derived_type.metadata.type_name) - self.assertEqual('EXPLICIT_NAME', - derived_type.metadata.type_display_name) - self.assertEqual('1.0', derived_type.metadata.type_version) - self.assertEqual('Type description', - derived_type.metadata.type_description) - self.assertEqual('some_endpoint', derived_type.metadata.endpoint) - self.assertIsNone(derived_instance.some_attr) - self.assertIsNotNone(derived_type.some_attr) - self.assertEqual(derived_type.some_attr, - derived_instance.metadata.attributes.all['some_attr']) - self.assertEqual('some_attr', derived_type.some_attr.name) - self.assertEqual('NAME', derived_type.some_attr.display_name) - - def test_wrong_type_definition(self): - def declare_wrong_type_version(): - class WrongType(BASE): - __type_version__ = 'abc' # not a semver - - return WrongType - - def declare_wrong_type_name(): - class WrongType(BASE): - __type_name__ = 'a' * 256 # too long - - return WrongType - - self.assertRaises(exc.InvalidArtifactTypeDefinition, - declare_wrong_type_version) - self.assertRaises(exc.InvalidArtifactTypeDefinition, - declare_wrong_type_name) - - def test_base_declarative_attributes(self): - class TestType(BASE): - defaulted = declarative.PropertyDefinition(default=42) - read_only = declarative.PropertyDefinition(readonly=True) - required_attr = declarative.PropertyDefinition(required=True) - - e = self.assertRaises(exc.InvalidArtifactPropertyValue, TestType) - self.assertEqual('required_attr', e.name) - self.assertIsNone(e.value) - tt = TestType(required_attr="universe") - self.assertEqual('universe', tt.required_attr) - self.assertEqual(42, tt.defaulted) - self.assertIsNone(tt.read_only) - - tt = TestType(required_attr="universe", defaulted=0, read_only="Hello") - self.assertEqual(0, tt.defaulted) - self.assertEqual("Hello", tt.read_only) - - tt.defaulted = 5 - self.assertEqual(5, tt.defaulted) - tt.required_attr = 'Foo' - self.assertEqual('Foo', tt.required_attr) - - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'read_only', 'some_val') - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'required_attr', None) - - # no type checks in base AttributeDefinition - o = object() - tt.required_attr = o - self.assertEqual(o, tt.required_attr) - - def test_generic_property(self): - class TestType(BASE): - simple_prop = declarative.PropertyDefinition() - immutable_internal = declarative.PropertyDefinition(mutable=False, - internal=True) - prop_with_allowed = declarative.PropertyDefinition( - allowed_values=["Foo", True, 42]) - - class DerivedType(TestType): - prop_with_allowed = declarative.PropertyDefinition( - allowed_values=["Foo", True, 42], required=True, default=42) - - tt = TestType() - self.assertEqual(True, - tt.metadata.attributes.all['simple_prop'].mutable) - self.assertEqual(False, - tt.metadata.attributes.all['simple_prop'].internal) - self.assertEqual(False, - tt.metadata.attributes.all[ - 'immutable_internal'].mutable) - self.assertEqual(True, - tt.metadata.attributes.all[ - 'immutable_internal'].internal) - self.assertIsNone(tt.prop_with_allowed) - tt = TestType(prop_with_allowed=42) - self.assertEqual(42, tt.prop_with_allowed) - tt = TestType(prop_with_allowed=True) - self.assertEqual(True, tt.prop_with_allowed) - tt = TestType(prop_with_allowed='Foo') - self.assertEqual('Foo', tt.prop_with_allowed) - - tt.prop_with_allowed = 42 - self.assertEqual(42, tt.prop_with_allowed) - tt.prop_with_allowed = 'Foo' - self.assertEqual('Foo', tt.prop_with_allowed) - tt.prop_with_allowed = True - self.assertEqual(True, tt.prop_with_allowed) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, 'prop_with_allowed', 'bar') - # ensure that wrong assignment didn't change the value - self.assertEqual(True, tt.prop_with_allowed) - self.assertRaises(exc.InvalidArtifactPropertyValue, TestType, - prop_with_allowed=False) - - dt = DerivedType() - self.assertEqual(42, dt.prop_with_allowed) - - def test_default_violates_allowed(self): - def declare_wrong_type(): - class WrongType(BASE): - prop = declarative.PropertyDefinition( - allowed_values=['foo', 'bar'], - default='baz') - - return WrongType - - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_type) - - def test_string_property(self): - class TestType(BASE): - simple = defs.String() - with_length = defs.String(max_length=10, min_length=5) - with_pattern = defs.String(pattern='^\\d+$', default='42') - - tt = TestType() - tt.simple = 'foo' - self.assertEqual('foo', tt.simple) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, 'simple', 42) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, 'simple', 'x' * 256) - self.assertRaises(exc.InvalidArtifactPropertyValue, TestType, - simple='x' * 256) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, 'with_length', 'x' * 11) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, 'with_length', 'x' * 4) - tt.simple = 'x' * 5 - self.assertEqual('x' * 5, tt.simple) - tt.simple = 'x' * 10 - self.assertEqual('x' * 10, tt.simple) - - self.assertEqual("42", tt.with_pattern) - tt.with_pattern = '0' - self.assertEqual('0', tt.with_pattern) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'with_pattern', 'abc') - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'with_pattern', '.123.') - - def test_binary_object_mutable(self): - def declare_blob(mutable): - class BLOB(BASE): - prop = defs.BinaryObject(mutable=mutable) - - return BLOB - - blob = declare_blob(False)() - self.assertFalse(blob.metadata.attributes.all['prop'].mutable) - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - functools.partial(declare_blob, True)) - - def test_default_and_allowed_violates_string_constrains(self): - def declare_wrong_default(): - class WrongType(BASE): - prop = defs.String(min_length=4, default='foo') - - return WrongType - - def declare_wrong_allowed(): - class WrongType(BASE): - prop = defs.String(min_length=4, allowed_values=['foo', 'bar']) - - return WrongType - - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_default) - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_allowed) - - def test_integer_property(self): - class TestType(BASE): - simple = defs.Integer() - constrained = defs.Integer(min_value=10, max_value=50) - - tt = TestType() - self.assertIsNone(tt.simple) - self.assertIsNone(tt.constrained) - - tt.simple = 0 - tt.constrained = 10 - self.assertEqual(0, tt.simple) - self.assertEqual(10, tt.constrained) - - tt.constrained = 50 - self.assertEqual(50, tt.constrained) - - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'constrained', 1) - self.assertEqual(50, tt.constrained) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'constrained', 51) - self.assertEqual(50, tt.constrained) - - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'simple', '11') - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'simple', 10.5) - - def test_default_and_allowed_violates_int_constrains(self): - def declare_wrong_default(): - class WrongType(BASE): - prop = defs.Integer(min_value=4, default=1) - - return WrongType - - def declare_wrong_allowed(): - class WrongType(BASE): - prop = defs.Integer(min_value=4, max_value=10, - allowed_values=[1, 15]) - - return WrongType - - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_default) - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_allowed) - - def test_numeric_values(self): - class TestType(BASE): - simple = defs.Numeric() - constrained = defs.Numeric(min_value=3.14, max_value=4.1) - - tt = TestType(simple=0.1, constrained=4) - self.assertEqual(0.1, tt.simple) - self.assertEqual(4.0, tt.constrained) - - tt.simple = 1 - self.assertEqual(1, tt.simple) - tt.constrained = 3.14 - self.assertEqual(3.14, tt.constrained) - tt.constrained = 4.1 - self.assertEqual(4.1, tt.constrained) - - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'simple', 'qwerty') - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'constrained', 3) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - 'constrained', 5) - - def test_default_and_allowed_violates_numeric_constrains(self): - def declare_wrong_default(): - class WrongType(BASE): - prop = defs.Numeric(min_value=4.0, default=1.1) - - return WrongType - - def declare_wrong_allowed(): - class WrongType(BASE): - prop = defs.Numeric(min_value=4.0, max_value=10.0, - allowed_values=[1.0, 15.5]) - - return WrongType - - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_default) - self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, - declare_wrong_allowed) - - def test_same_item_type_array(self): - class TestType(BASE): - simple = defs.Array() - unique = defs.Array(unique=True) - simple_with_allowed_values = defs.Array( - defs.String(allowed_values=["Foo", "Bar"])) - defaulted = defs.Array(defs.Boolean(), default=[True, False]) - constrained = defs.Array(item_type=defs.Numeric(min_value=0), - min_size=3, max_size=5, unique=True) - - tt = TestType(simple=[]) - self.assertEqual([], tt.simple) - tt.simple.append("Foo") - self.assertEqual(["Foo"], tt.simple) - tt.simple.append("Foo") - self.assertEqual(["Foo", "Foo"], tt.simple) - self.assertEqual(2, len(tt.simple)) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple.append, - 42) - tt.simple.pop(1) - self.assertEqual(["Foo"], tt.simple) - del tt.simple[0] - self.assertEqual(0, len(tt.simple)) - - tt.simple_with_allowed_values = ["Foo"] - tt.simple_with_allowed_values.insert(0, "Bar") - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.simple_with_allowed_values.append, "Baz") - - self.assertEqual([True, False], tt.defaulted) - tt.defaulted.pop() - self.assertEqual([True], tt.defaulted) - tt2 = TestType() - self.assertEqual([True, False], tt2.defaulted) - - self.assertIsNone(tt.constrained) - tt.constrained = [10, 5, 4] - self.assertEqual([10, 5, 4], tt.constrained) - tt.constrained[1] = 15 - self.assertEqual([10, 15, 4], tt.constrained) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained.__setitem__, 1, -5) - self.assertEqual([10, 15, 4], tt.constrained) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained.remove, 15) - self.assertEqual([10, 15, 4], tt.constrained) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained.__delitem__, 1) - self.assertEqual([10, 15, 4], tt.constrained) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained.append, 15) - self.assertEqual([10, 15, 4], tt.constrained) - - tt.unique = [] - tt.unique.append("foo") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.unique.append, - "foo") - - def test_tuple_style_array(self): - class TestType(BASE): - address = defs.Array( - item_type=[defs.String(20), defs.Integer(min_value=1), - defs.Boolean()]) - - tt = TestType(address=["Hope Street", 1234, True]) - self.assertEqual("Hope Street", tt.address[0]) - self.assertEqual(1234, tt.address[1]) - self.assertEqual(True, tt.address[2]) - - # On Python 3, sort() fails because int (1) and string ("20") are not - # comparable - if six.PY2: - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.address.sort) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 0) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 1) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.append, - "Foo") - - def test_same_item_type_dict(self): - class TestType(BASE): - simple_props = defs.Dict() - constrained_props = defs.Dict( - properties=defs.Integer(min_value=1, allowed_values=[1, 2]), - min_properties=2, - max_properties=3) - - tt = TestType() - self.assertIsNone(tt.simple_props) - self.assertIsNone(tt.constrained_props) - tt.simple_props = {} - self.assertEqual({}, tt.simple_props) - tt.simple_props["foo"] = "bar" - self.assertEqual({"foo": "bar"}, tt.simple_props) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.simple_props.__setitem__, 42, "foo") - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.simple_props.setdefault, "bar", 42) - - tt.constrained_props = {"foo": 1, "bar": 2} - self.assertEqual({"foo": 1, "bar": 2}, tt.constrained_props) - tt.constrained_props["baz"] = 1 - self.assertEqual({"foo": 1, "bar": 2, "baz": 1}, tt.constrained_props) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained_props.__setitem__, "foo", 3) - self.assertEqual(1, tt.constrained_props["foo"]) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained_props.__setitem__, "qux", 2) - tt.constrained_props.pop("foo") - self.assertEqual({"bar": 2, "baz": 1}, tt.constrained_props) - tt.constrained_props['qux'] = 2 - self.assertEqual({"qux": 2, "bar": 2, "baz": 1}, tt.constrained_props) - tt.constrained_props.popitem() - dict_copy = tt.constrained_props.copy() - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.constrained_props.popitem) - self.assertEqual(dict_copy, tt.constrained_props) - - def test_composite_dict(self): - class TestType(BASE): - props = defs.Dict(properties={"foo": defs.String(), - "bar": defs.Boolean()}) - fixed = defs.Dict(properties={"name": defs.String(min_length=2), - "age": defs.Integer(min_value=0, - max_value=99)}) - - tt = TestType() - tt.props = {"foo": "FOO", "bar": False} - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.props.__setitem__, "bar", 123) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.props.__setitem__, "extra", "value") - tt.fixed = {"name": "Alex", "age": 42} - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.fixed.__setitem__, "age", 120) - - def test_immutables(self): - class TestType(BASE): - activated = defs.Boolean(required=True, default=False) - name = defs.String(mutable=False) - - def __is_mutable__(self): - return not self.activated - - tt = TestType() - self.assertEqual(False, tt.activated) - self.assertIsNone(tt.name) - tt.name = "Foo" - self.assertEqual("Foo", tt.name) - tt.activated = True - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, - tt, "name", "Bar") - self.assertEqual("Foo", tt.name) - tt.activated = False - tt.name = "Bar" - self.assertEqual("Bar", tt.name) - - def test_readonly_array_dict(self): - class TestType(BASE): - arr = defs.Array(readonly=True) - dict = defs.Dict(readonly=True) - - tt = TestType(arr=["Foo", "Bar"], dict={"qux": "baz"}) - self.assertEqual(["Foo", "Bar"], tt.arr) - self.assertEqual({"qux": "baz"}, tt.dict) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, - "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert, - 0, "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__, - 0, "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove, - "Foo") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop, - "qux") - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.dict.__setitem__, "qux", "foo") - - def test_mutable_array_dict(self): - class TestType(BASE): - arr = defs.Array(mutable=False) - dict = defs.Dict(mutable=False) - activated = defs.Boolean() - - def __is_mutable__(self): - return not self.activated - - tt = TestType() - tt.arr = [] - tt.dict = {} - tt.arr.append("Foo") - tt.arr.insert(0, "Bar") - tt.dict["baz"] = "qux" - tt.activated = True - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, - "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert, - 0, "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__, - 0, "Baz") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove, - "Foo") - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop, - "qux") - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.dict.__setitem__, "qux", "foo") - - def test_readonly_as_write_once(self): - class TestType(BASE): - prop = defs.String(readonly=True) - arr = defs.Array(readonly=True) - - tt = TestType() - self.assertIsNone(tt.prop) - tt.prop = "Foo" - self.assertEqual("Foo", tt.prop) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, - "prop", "bar") - tt2 = TestType() - self.assertIsNone(tt2.prop) - tt2.prop = None - self.assertIsNone(tt2.prop) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2, - "prop", None) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2, - "prop", "foo") - self.assertIsNone(tt.arr) - tt.arr = ["foo", "bar"] - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, - 'baz') - self.assertIsNone(tt2.arr) - tt2.arr = None - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, - 'baz') - - -class TestArtifactType(test_utils.BaseTestCase): - def test_create_artifact(self): - a = defs.ArtifactType(**get_artifact_fixture()) - self.assertIsNotNone(a) - self.assertEqual("123", a.id) - self.assertEqual("ArtifactType", a.type_name) - self.assertEqual("1.0", a.type_version) - self.assertEqual("11.2", a.version) - self.assertEqual("Foo", a.name) - self.assertEqual("private", a.visibility) - self.assertEqual("creating", a.state) - self.assertEqual("my_tenant", a.owner) - self.assertEqual(a.created_at, a.updated_at) - self.assertIsNone(a.description) - self.assertIsNone(a.published_at) - self.assertIsNone(a.deleted_at) - - self.assertIsNone(a.description) - - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "id", - "foo") - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "state", "active") - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "owner", "some other") - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "created_at", datetime.datetime.now()) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "deleted_at", datetime.datetime.now()) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "updated_at", datetime.datetime.now()) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "published_at", datetime.datetime.now()) - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, - "visibility", "wrong") - - def test_dependency_prop(self): - class DerivedType(defs.ArtifactType): - depends_on_any = defs.ArtifactReference() - depends_on_self = defs.ArtifactReference(type_name='DerivedType') - depends_on_self_version = defs.ArtifactReference( - type_name='DerivedType', - type_version='1.0') - - class DerivedTypeV11(DerivedType): - __type_name__ = 'DerivedType' - __type_version__ = '1.1' - depends_on_self_version = defs.ArtifactReference( - type_name='DerivedType', - type_version='1.1') - - d1 = DerivedType(**get_artifact_fixture()) - d2 = DerivedTypeV11(**get_artifact_fixture()) - a = defs.ArtifactType(**get_artifact_fixture()) - d1.depends_on_any = a - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1, - 'depends_on_self', a) - d1.depends_on_self = d2 - d2.depends_on_self = d1 - d1.depends_on_self_version = d1 - d2.depends_on_self_version = d2 - self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1, - 'depends_on_self_version', d2) - - def test_dependency_list(self): - class FooType(defs.ArtifactType): - pass - - class BarType(defs.ArtifactType): - pass - - class TestType(defs.ArtifactType): - depends_on = defs.ArtifactReferenceList() - depends_on_self_or_foo = defs.ArtifactReferenceList( - references=defs.ArtifactReference(['FooType', 'TestType'])) - - a = defs.ArtifactType(**get_artifact_fixture(id="1")) - a_copy = defs.ArtifactType(**get_artifact_fixture(id="1")) - b = defs.ArtifactType(**get_artifact_fixture(id="2")) - - tt = TestType(**get_artifact_fixture(id="3")) - foo = FooType(**get_artifact_fixture(id='4')) - bar = BarType(**get_artifact_fixture(id='4')) - - tt.depends_on.append(a) - tt.depends_on.append(b) - self.assertEqual([a, b], tt.depends_on) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.depends_on.append, a) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.depends_on.append, a_copy) - - tt.depends_on_self_or_foo.append(tt) - tt.depends_on_self_or_foo.append(foo) - self.assertRaises(exc.InvalidArtifactPropertyValue, - tt.depends_on_self_or_foo.append, bar) - self.assertEqual([tt, foo], tt.depends_on_self_or_foo) - - def test_blob(self): - class TestType(defs.ArtifactType): - image_file = defs.BinaryObject(max_file_size=201054, - min_locations=1, - max_locations=5) - screen_shots = defs.BinaryObjectList( - objects=defs.BinaryObject(min_file_size=100), min_count=1) - - tt = TestType(**get_artifact_fixture()) - blob = defs.Blob() - blob.size = 1024 - blob.locations.append("file://some.file.path") - tt.image_file = blob - - self.assertEqual(1024, tt.image_file.size) - self.assertEqual(["file://some.file.path"], tt.image_file.locations) - - def test_pre_publish_blob_validation(self): - class TestType(defs.ArtifactType): - required_blob = defs.BinaryObject(required=True) - optional_blob = defs.BinaryObject() - - tt = TestType(**get_artifact_fixture()) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__) - tt.required_blob = defs.Blob(size=0) - tt.__pre_publish__() - - def test_pre_publish_dependency_validation(self): - class TestType(defs.ArtifactType): - required_dependency = defs.ArtifactReference(required=True) - optional_dependency = defs.ArtifactReference() - - tt = TestType(**get_artifact_fixture()) - self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__) - tt.required_dependency = defs.ArtifactType(**get_artifact_fixture()) - tt.__pre_publish__() - - def test_default_value_of_immutable_field_in_active_state(self): - class TestType(defs.ArtifactType): - foo = defs.String(default='Bar', mutable=False) - tt = TestType(**get_artifact_fixture(state='active')) - self.assertEqual('Bar', tt.foo) - - -class SerTestType(defs.ArtifactType): - some_string = defs.String() - some_text = defs.Text() - some_version = defs.SemVerString() - some_int = defs.Integer() - some_numeric = defs.Numeric() - some_bool = defs.Boolean() - some_array = defs.Array() - another_array = defs.Array( - item_type=[defs.Integer(), defs.Numeric(), defs.Boolean()]) - some_dict = defs.Dict() - another_dict = defs.Dict( - properties={'foo': defs.Integer(), 'bar': defs.Boolean()}) - some_ref = defs.ArtifactReference() - some_ref_list = defs.ArtifactReferenceList() - some_blob = defs.BinaryObject() - some_blob_list = defs.BinaryObjectList() - - -class TestSerialization(test_utils.BaseTestCase): - def test_serialization_to_db(self): - ref1 = defs.ArtifactType(**get_artifact_fixture(id="1")) - ref2 = defs.ArtifactType(**get_artifact_fixture(id="2")) - ref3 = defs.ArtifactType(**get_artifact_fixture(id="3")) - - blob1 = defs.Blob(size=100, locations=['http://example.com/blob1'], - item_key='some_key', checksum='abc') - blob2 = defs.Blob(size=200, locations=['http://example.com/blob2'], - item_key='another_key', checksum='fff') - blob3 = defs.Blob(size=300, locations=['http://example.com/blob3'], - item_key='third_key', checksum='123') - - fixture = get_artifact_fixture() - tt = SerTestType(**fixture) - tt.some_string = 'bar' - tt.some_text = 'bazz' - tt.some_version = '11.22.33-beta' - tt.some_int = 50 - tt.some_numeric = 10.341 - tt.some_bool = True - tt.some_array = ['q', 'w', 'e', 'r', 't', 'y'] - tt.another_array = [1, 1.2, False] - tt.some_dict = {'foobar': "FOOBAR", 'baz': "QUX"} - tt.another_dict = {'foo': 1, 'bar': True} - tt.some_ref = ref1 - tt.some_ref_list = [ref2, ref3] - tt.some_blob = blob1 - tt.some_blob_list = [blob2, blob3] - - results = serialization.serialize_for_db(tt) - expected = fixture - expected['type_name'] = 'SerTestType' - expected['type_version'] = '1.0' - expected['properties'] = { - 'some_string': { - 'type': 'string', - 'value': 'bar' - }, - 'some_text': { - 'type': 'text', - 'value': 'bazz' - }, - 'some_version': { - 'type': 'string', - 'value': '11.22.33-beta' - }, - 'some_int': { - 'type': 'int', - 'value': 50 - }, - 'some_numeric': { - 'type': 'numeric', - 'value': 10.341 - }, - 'some_bool': { - 'type': 'bool', - 'value': True - }, - 'some_array': { - 'type': 'array', - 'value': [ - { - 'type': 'string', - 'value': 'q' - }, - { - 'type': 'string', - 'value': 'w' - }, - { - 'type': 'string', - 'value': 'e' - }, - { - 'type': 'string', - 'value': 'r' - }, - { - 'type': 'string', - 'value': 't' - }, - { - 'type': 'string', - 'value': 'y' - } - ] - }, - 'another_array': { - 'type': 'array', - 'value': [ - { - 'type': 'int', - 'value': 1 - }, - { - 'type': 'numeric', - 'value': 1.2 - }, - { - 'type': 'bool', - 'value': False - } - ] - }, - 'some_dict.foobar': { - 'type': 'string', - 'value': 'FOOBAR' - }, - 'some_dict.baz': { - 'type': 'string', - 'value': 'QUX' - }, - 'another_dict.foo': { - 'type': 'int', - 'value': 1 - }, - 'another_dict.bar': { - 'type': 'bool', - 'value': True - } - } - expected['dependencies'] = { - 'some_ref': ['1'], - 'some_ref_list': ['2', '3'] - } - expected['blobs'] = { - 'some_blob': [ - { - 'size': 100, - 'checksum': 'abc', - 'item_key': 'some_key', - 'locations': ['http://example.com/blob1'] - }], - 'some_blob_list': [ - { - 'size': 200, - 'checksum': 'fff', - 'item_key': 'another_key', - 'locations': ['http://example.com/blob2'] - }, - { - 'size': 300, - 'checksum': '123', - 'item_key': 'third_key', - 'locations': ['http://example.com/blob3'] - } - ] - } - - self.assertEqual(expected, results) - - def test_deserialize_from_db(self): - ts = datetime.datetime.now() - db_dict = { - "type_name": 'SerTestType', - "type_version": '1.0', - "id": "123", - "version": "11.2", - "description": None, - "name": "Foo", - "visibility": "private", - "state": "creating", - "owner": "my_tenant", - "created_at": ts, - "updated_at": ts, - "deleted_at": None, - "published_at": None, - "tags": ["test", "fixture"], - "properties": { - 'some_string': { - 'type': 'string', - 'value': 'bar' - }, - 'some_text': { - 'type': 'text', - 'value': 'bazz' - }, - 'some_version': { - 'type': 'string', - 'value': '11.22.33-beta' - }, - 'some_int': { - 'type': 'int', - 'value': 50 - }, - 'some_numeric': { - 'type': 'numeric', - 'value': 10.341 - }, - 'some_bool': { - 'type': 'bool', - 'value': True - }, - 'some_array': { - 'type': 'array', - 'value': [ - { - 'type': 'string', - 'value': 'q' - }, - { - 'type': 'string', - 'value': 'w' - }, - { - 'type': 'string', - 'value': 'e' - }, - { - 'type': 'string', - 'value': 'r' - }, - { - 'type': 'string', - 'value': 't' - }, - { - 'type': 'string', - 'value': 'y' - } - ] - }, - 'another_array': { - 'type': 'array', - 'value': [ - { - 'type': 'int', - 'value': 1 - }, - { - 'type': 'numeric', - 'value': 1.2 - }, - { - 'type': 'bool', - 'value': False - } - ] - }, - 'some_dict.foobar': { - 'type': 'string', - 'value': 'FOOBAR' - }, - 'some_dict.baz': { - 'type': 'string', - 'value': 'QUX' - }, - 'another_dict.foo': { - 'type': 'int', - 'value': 1 - }, - 'another_dict.bar': { - 'type': 'bool', - 'value': True - } - }, - 'blobs': { - 'some_blob': [ - { - 'size': 100, - 'checksum': 'abc', - 'item_key': 'some_key', - 'locations': ['http://example.com/blob1'] - }], - 'some_blob_list': [ - { - 'size': 200, - 'checksum': 'fff', - 'item_key': 'another_key', - 'locations': ['http://example.com/blob2'] - }, - { - 'size': 300, - 'checksum': '123', - 'item_key': 'third_key', - 'locations': ['http://example.com/blob3'] - } - ] - }, - 'dependencies': { - 'some_ref': [ - { - "type_name": 'ArtifactType', - "type_version": '1.0', - "id": "1", - "version": "11.2", - "description": None, - "name": "Foo", - "visibility": "private", - "state": "creating", - "owner": "my_tenant", - "created_at": ts, - "updated_at": ts, - "deleted_at": None, - "published_at": None, - "tags": ["test", "fixture"], - "properties": {}, - "blobs": {}, - "dependencies": {} - } - ], - 'some_ref_list': [ - { - "type_name": 'ArtifactType', - "type_version": '1.0', - "id": "2", - "version": "11.2", - "description": None, - "name": "Foo", - "visibility": "private", - "state": "creating", - "owner": "my_tenant", - "created_at": ts, - "updated_at": ts, - "deleted_at": None, - "published_at": None, - "tags": ["test", "fixture"], - "properties": {}, - "blobs": {}, - "dependencies": {} - }, - { - "type_name": 'ArtifactType', - "type_version": '1.0', - "id": "3", - "version": "11.2", - "description": None, - "name": "Foo", - "visibility": "private", - "state": "creating", - "owner": "my_tenant", - "created_at": ts, - "updated_at": ts, - "deleted_at": None, - "published_at": None, - "tags": ["test", "fixture"], - "properties": {}, - "blobs": {}, - "dependencies": {} - } - ] - } - } - plugins_dict = {'SerTestType': [SerTestType], - 'ArtifactType': [defs.ArtifactType]} - - def _retrieve_plugin(name, version): - return next((p for p in plugins_dict.get(name, []) - if version and p.version == version), - plugins_dict.get(name, [None])[0]) - plugins = mock.Mock() - plugins.get_class_by_typename = _retrieve_plugin - art = serialization.deserialize_from_db(db_dict, plugins) - self.assertEqual('123', art.id) - self.assertEqual('11.2', art.version) - self.assertIsNone(art.description) - self.assertEqual('Foo', art.name) - self.assertEqual('private', art.visibility) - self.assertEqual('private', art.visibility) - - -def get_artifact_fixture(**kwargs): - ts = datetime.datetime.now() - fixture = { - "id": "123", - "version": "11.2", - "description": None, - "name": "Foo", - "visibility": "private", - "state": "creating", - "owner": "my_tenant", - "created_at": ts, - "updated_at": ts, - "deleted_at": None, - "published_at": None, - "tags": ["test", "fixture"] - } - fixture.update(kwargs) - return fixture diff --git a/glance/tests/unit/test_store_glare.py b/glance/tests/unit/test_store_glare.py deleted file mode 100644 index 75410c263e..0000000000 --- a/glance/tests/unit/test_store_glare.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from datetime import datetime - -from glance.common.glare import definitions -import glance.context -from glance.glare.domain import proxy -from glance.glare import location -from glance.tests.unit import utils as unit_test_utils -from glance.tests import utils - - -BASE_URI = 'http://storeurl.com/container' -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '228c6da5-29cd-4d67-9457-ed632e083fc0' - - -class ArtifactStub(definitions.ArtifactType): - file = definitions.BinaryObject() - file_list = definitions.BinaryObjectList() - - -class TestStoreArtifact(utils.BaseTestCase): - def setUp(self): - self.store_api = unit_test_utils.FakeStoreAPI() - self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) - ts = datetime.now() - self.artifact_stub = ArtifactStub(id=UUID2, state='creating', - created_at=ts, updated_at=ts, - version='1.0', owner='me', - name='foo') - super(TestStoreArtifact, self).setUp() - - def test_set_blob_data(self): - context = glance.context.RequestContext(user=USER1) - helper = proxy.ArtifactHelper(location.ArtifactProxy, - proxy_kwargs={ - 'context': context, - 'store_api': self.store_api, - 'store_utils': self.store_utils - }) - artifact = helper.proxy(self.artifact_stub) - artifact.file = ('YYYY', 4) - self.assertEqual(4, artifact.file.size) - - def test_set_bloblist_data(self): - context = glance.context.RequestContext(user=USER1) - helper = proxy.ArtifactHelper(location.ArtifactProxy, - proxy_kwargs={ - 'context': context, - 'store_api': self.store_api, - 'store_utils': self.store_utils - }) - artifact = helper.proxy(self.artifact_stub) - artifact.file_list.append(('YYYY', 4)) - self.assertEqual(4, artifact.file_list[0].size) diff --git a/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml b/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml new file mode 100644 index 0000000000..7a42599afa --- /dev/null +++ b/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml @@ -0,0 +1,41 @@ +--- +upgrade: + - | + Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL + API has been removed from the Glance codebase, as it was relocated into an + independent `Glare`_ project repository during a previous release cycle. + The database upgrade for the Glance Pike release drops the Glare tables + (named 'artifacts' and 'artifact_*') from the Glance database. + + OpenStack deployments, packagers, and deployment projects which provided + Glare should have begun to consume Glare from its own `Glare`_ respository + during the Newton and Ocata releases. With the Pike release, it is no + longer possible to consume Glare code from the Glance repository. + + .. _`Glare`: https://git.openstack.org/cgit/openstack/glare +other: + - | + Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API + has been `removed`_ from the Glance codebase. + + The Artifacts API was an EXPERIMENTAL API that ran on the Glance service + endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the + Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service + ran on its own endpoint (completely independent from the Glance service + endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the + Liberty and Mitaka releases, Glare ran on code stored in the Glance code + repository and used its own tables in the Glance database. + + In the Newton release, the Glare code was relocated into its own `Glare`_ + project repository. Also in the Newton release, Glare ran an EXPERIMENTAL + Artifacts API versioned as ``v1.0`` on its own endpoint and used its own + database. + + For the Pike release, the legacy Glare code has been removed from the + Glance code repository and the legacy 'artifacts' and 'artifact_*' database + tables are dropped from the Glance database. As the Artifacts service API + was an EXPERIMENTAL API in Glance and has not used the Glance database + since Mitaka, no provision is made for migrating data from the Glance + database to the Glare database. + + .. _`removed`: http://specs.openstack.org/openstack/glance-specs/specs/mitaka/implemented/deprecate-v3-api.html diff --git a/requirements.txt b/requirements.txt index ce1782f0f7..27839cb772 100644 --- a/requirements.txt +++ b/requirements.txt @@ -51,9 +51,6 @@ osprofiler>=1.4.0 # Apache-2.0 glance-store>=0.18.0 # Apache-2.0 -# Artifact repository -semantic-version>=2.3.1 # BSD - debtcollector>=1.2.0 # Apache-2.0 cryptography!=1.3.0,>=1.0 # BSD/Apache-2.0 cursive>=0.1.1 # Apache-2.0