2010-08-20 00:42:38 +00:00
|
|
|
[DEFAULT]
|
2010-07-12 17:03:45 -05:00
|
|
|
# bind_ip = 0.0.0.0
|
|
|
|
# bind_port = 80
|
2012-11-26 12:39:46 -08:00
|
|
|
# bind_timeout = 30
|
2010-10-13 21:24:30 +00:00
|
|
|
# backlog = 4096
|
2010-08-20 02:19:50 +00:00
|
|
|
# swift_dir = /etc/swift
|
2010-08-20 00:42:38 +00:00
|
|
|
# user = swift
|
2013-10-16 19:28:37 -05:00
|
|
|
|
|
|
|
# Enables exposing configuration settings via HTTP GET /info.
|
|
|
|
# expose_info = true
|
|
|
|
|
|
|
|
# Key to use for admin calls that are HMAC signed. Default is empty,
|
|
|
|
# which will disable admin calls to /info.
|
|
|
|
# admin_key = secret_admin_key
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-10-16 19:28:37 -05:00
|
|
|
# Allows the ability to withhold sections from showing up in the public
|
|
|
|
# calls to /info. The following would cause the sections 'container_quotas'
|
|
|
|
# and 'tempurl' to not be listed. Default is empty, allowing all registered
|
|
|
|
# fetures to be listed via HTTP GET /info.
|
|
|
|
# disallowed_sections = container_quotas, tempurl
|
|
|
|
|
2013-07-11 17:00:57 -07:00
|
|
|
# Use an integer to override the number of pre-forked processes that will
|
|
|
|
# accept connections. Should default to the number of effective cpu
|
|
|
|
# cores in the system. It's worth noting that individual workers will
|
|
|
|
# use many eventlet co-routines to service multiple concurrent requests.
|
|
|
|
# workers = auto
|
|
|
|
#
|
|
|
|
# Maximum concurrent requests per worker
|
|
|
|
# max_clients = 1024
|
|
|
|
#
|
2012-04-23 16:27:43 -05:00
|
|
|
# Set the following two lines to enable SSL. This is for testing only.
|
2010-07-12 17:03:45 -05:00
|
|
|
# cert_file = /etc/swift/proxy.crt
|
|
|
|
# key_file = /etc/swift/proxy.key
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-10-26 21:42:24 +00:00
|
|
|
# expiring_objects_container_divisor = 86400
|
2014-02-04 16:31:47 +05:30
|
|
|
# expiring_objects_account_name = expiring_objects
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can specify default log routing here if you want:
|
|
|
|
# log_name = swift
|
|
|
|
# log_facility = LOG_LOCAL0
|
|
|
|
# log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-03-25 11:27:36 -05:00
|
|
|
# This optional suffix (default is empty) that would be appended to the swift transaction
|
|
|
|
# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
|
|
|
|
# This is very useful when one is managing more than one swift cluster.
|
|
|
|
# trans_id_suffix =
|
|
|
|
#
|
2012-10-05 15:56:34 -05:00
|
|
|
# comma separated list of functions to call to setup custom log handlers.
|
|
|
|
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
|
|
|
# adapted_logger
|
|
|
|
# log_custom_handlers =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# If set, log_udp_host will override log_address
|
|
|
|
# log_udp_host =
|
|
|
|
# log_udp_port = 514
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# You can enable StatsD logging here:
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
# log_statsd_host = localhost
|
|
|
|
# log_statsd_port = 8125
|
2013-01-19 15:25:27 -08:00
|
|
|
# log_statsd_default_sample_rate = 1.0
|
|
|
|
# log_statsd_sample_rate_factor = 1.0
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
# log_statsd_metric_prefix =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-10-11 16:52:26 -05:00
|
|
|
# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
|
|
|
|
# cors_allow_origin =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-03-25 16:34:43 -07:00
|
|
|
# client_timeout = 60
|
2012-12-06 15:09:53 -06:00
|
|
|
# eventlet_debug = false
|
2010-08-20 00:42:38 +00:00
|
|
|
|
|
|
|
[pipeline:main]
|
2013-11-21 17:31:16 -08:00
|
|
|
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk slo dlo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server
|
2010-08-20 00:42:38 +00:00
|
|
|
|
2010-09-01 15:56:37 +00:00
|
|
|
[app:proxy-server]
|
2010-08-20 00:42:38 +00:00
|
|
|
use = egg:swift#proxy
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this app here:
|
|
|
|
# set log_name = proxy-server
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-07-21 12:18:24 +08:00
|
|
|
# log_handoffs = true
|
2010-07-12 17:03:45 -05:00
|
|
|
# recheck_account_existence = 60
|
|
|
|
# recheck_container_existence = 60
|
|
|
|
# object_chunk_size = 8192
|
|
|
|
# client_chunk_size = 8192
|
2014-01-27 15:32:01 -08:00
|
|
|
#
|
|
|
|
# How long the proxy server will wait on responses from the a/c/o servers.
|
2010-07-12 17:03:45 -05:00
|
|
|
# node_timeout = 10
|
2014-01-27 15:32:01 -08:00
|
|
|
#
|
|
|
|
# How long the proxy server will wait for an initial response and to read a
|
|
|
|
# chunk of data from the object servers while serving GET / HEAD requests.
|
|
|
|
# Timeouts from these requests can be recovered from so setting this to
|
|
|
|
# something lower than node_timeout would provide quicker error recovery
|
|
|
|
# while allowing for a longer timeout for non-recoverable requests (PUTs).
|
|
|
|
# Defaults to node_timeout, should be overriden if node_timeout is set to a
|
|
|
|
# high number to prevent client timeouts from firing before the proxy server
|
|
|
|
# has a chance to retry.
|
|
|
|
# recoverable_node_timeout = node_timeout
|
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# conn_timeout = 0.5
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-11-25 18:58:34 +00:00
|
|
|
# How long to wait for requests to finish after a quorum has been established.
|
|
|
|
# post_quorum_timeout = 0.5
|
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# How long without an error before a node's error count is reset. This will
|
|
|
|
# also be how long before a node is reenabled after suppression is triggered.
|
|
|
|
# error_suppression_interval = 60
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# How many errors can accumulate before a node is temporarily ignored.
|
|
|
|
# error_suppression_limit = 10
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-11-29 15:19:29 -08:00
|
|
|
# If set to 'true' any authorized user may create and delete accounts; if
|
|
|
|
# 'false' no one, even authorized, can.
|
|
|
|
# allow_account_management = false
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-06-08 04:29:24 +00:00
|
|
|
# Set object_post_as_copy = false to turn on fast posts where only the metadata
|
2011-06-08 04:19:34 +00:00
|
|
|
# changes are stored anew and the original data file is kept in place. This
|
|
|
|
# makes for quicker posts; but since the container metadata isn't updated in
|
|
|
|
# this mode, features like container sync won't be able to sync posts.
|
2011-06-08 04:29:24 +00:00
|
|
|
# object_post_as_copy = true
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-06-05 23:22:35 +00:00
|
|
|
# If set to 'true' authorized accounts that do not yet exist within the Swift
|
|
|
|
# cluster will be automatically created.
|
|
|
|
# account_autocreate = false
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-03-14 17:30:02 +00:00
|
|
|
# If set to a positive value, trying to create a container when the account
|
|
|
|
# already has at least this maximum containers will result in a 403 Forbidden.
|
|
|
|
# Note: This is a soft limit, meaning a user might exceed the cap for
|
|
|
|
# recheck_account_existence before the 403s kick in.
|
|
|
|
# max_containers_per_account = 0
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-03-14 17:30:02 +00:00
|
|
|
# This is a comma separated list of account hashes that ignore the
|
|
|
|
# max_containers_per_account cap.
|
|
|
|
# max_containers_whitelist =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-08-20 22:51:46 -07:00
|
|
|
# Comma separated list of Host headers to which the proxy will deny requests.
|
2012-04-12 12:46:03 -07:00
|
|
|
# deny_host_headers =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-08-20 22:51:46 -07:00
|
|
|
# Prefix used when automatically creating accounts.
|
2012-04-28 16:31:00 +10:00
|
|
|
# auto_create_account_prefix = .
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-08-20 22:51:46 -07:00
|
|
|
# Depth of the proxy put queue.
|
2012-04-28 16:31:00 +10:00
|
|
|
# put_queue_depth = 10
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Local read affinity for GET/HEAD requests.
Now you can configure the proxy server to read from "local" primary
nodes first, where "local" is governed by the newly-introduced
"read_affinity" setting in the proxy config. This is desirable when
the network links between regions/zones are of varying capacities; in
such a case, it's a good idea to prefer fetching data from closer
backends.
The new setting looks like rN[zM]=P, where N is the region number, M
is the optional zone number, and P is the priority. Multiple values
can be specified by separating them with commas. The priority for
nodes that don't match anything is a very large number, so they'll
sort last.
This only affects the ordering of the primary nodes; it doesn't affect
handoffs at all. Further, while the primary nodes are reordered for
all requests, it only matters for GET/HEAD requests since handling the
other verbs ends up making concurrent requests to *all* the primary
nodes, so ordering is irrelevant.
Note that the default proxy config does not have this setting turned
on, so the default configuration's behavior is unaffected.
blueprint multi-region
Change-Id: Iea4cd367ed37fe5ee69b63234541d358d29963a4
2013-06-06 18:01:35 -07:00
|
|
|
# Storage nodes can be chosen at random (shuffle), by using timing
|
|
|
|
# measurements (timing), or by using an explicit match (affinity).
|
|
|
|
# Using timing measurements may allow for lower overall latency, while
|
|
|
|
# using affinity allows for finer control. In both the timing and
|
|
|
|
# affinity cases, equally-sorting nodes are still randomly chosen to
|
|
|
|
# spread load.
|
|
|
|
# The valid values for sorting_method are "affinity", "shuffle", and "timing".
|
2013-02-07 22:07:18 -08:00
|
|
|
# sorting_method = shuffle
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Local read affinity for GET/HEAD requests.
Now you can configure the proxy server to read from "local" primary
nodes first, where "local" is governed by the newly-introduced
"read_affinity" setting in the proxy config. This is desirable when
the network links between regions/zones are of varying capacities; in
such a case, it's a good idea to prefer fetching data from closer
backends.
The new setting looks like rN[zM]=P, where N is the region number, M
is the optional zone number, and P is the priority. Multiple values
can be specified by separating them with commas. The priority for
nodes that don't match anything is a very large number, so they'll
sort last.
This only affects the ordering of the primary nodes; it doesn't affect
handoffs at all. Further, while the primary nodes are reordered for
all requests, it only matters for GET/HEAD requests since handling the
other verbs ends up making concurrent requests to *all* the primary
nodes, so ordering is irrelevant.
Note that the default proxy config does not have this setting turned
on, so the default configuration's behavior is unaffected.
blueprint multi-region
Change-Id: Iea4cd367ed37fe5ee69b63234541d358d29963a4
2013-06-06 18:01:35 -07:00
|
|
|
# If the "timing" sorting_method is used, the timings will only be valid for
|
2013-02-07 22:07:18 -08:00
|
|
|
# the number of seconds configured by timing_expiry.
|
|
|
|
# timing_expiry = 300
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-05-17 14:35:08 -07:00
|
|
|
# The maximum time (seconds) that a large object connection is allowed to last.
|
|
|
|
# max_large_object_get_time = 86400
|
|
|
|
#
|
2013-04-06 01:35:58 +00:00
|
|
|
# Set to the number of nodes to contact for a normal request. You can use
|
|
|
|
# '* replicas' at the end to have it use the number given times the number of
|
|
|
|
# replicas for the ring being used for the request.
|
|
|
|
# request_node_count = 2 * replicas
|
Local read affinity for GET/HEAD requests.
Now you can configure the proxy server to read from "local" primary
nodes first, where "local" is governed by the newly-introduced
"read_affinity" setting in the proxy config. This is desirable when
the network links between regions/zones are of varying capacities; in
such a case, it's a good idea to prefer fetching data from closer
backends.
The new setting looks like rN[zM]=P, where N is the region number, M
is the optional zone number, and P is the priority. Multiple values
can be specified by separating them with commas. The priority for
nodes that don't match anything is a very large number, so they'll
sort last.
This only affects the ordering of the primary nodes; it doesn't affect
handoffs at all. Further, while the primary nodes are reordered for
all requests, it only matters for GET/HEAD requests since handling the
other verbs ends up making concurrent requests to *all* the primary
nodes, so ordering is irrelevant.
Note that the default proxy config does not have this setting turned
on, so the default configuration's behavior is unaffected.
blueprint multi-region
Change-Id: Iea4cd367ed37fe5ee69b63234541d358d29963a4
2013-06-06 18:01:35 -07:00
|
|
|
#
|
|
|
|
# Which backend servers to prefer on reads. Format is r<N> for region
|
|
|
|
# N or r<N>z<M> for region N, zone M. The value after the equals is
|
|
|
|
# the priority; lower numbers are higher priority.
|
|
|
|
#
|
|
|
|
# Example: first read from region 1 zone 1, then region 1 zone 2, then
|
|
|
|
# anything in region 2, then everything else:
|
|
|
|
# read_affinity = r1z1=100, r1z2=200, r2=300
|
|
|
|
# Default is empty, meaning no preference.
|
|
|
|
# read_affinity =
|
2013-06-13 11:24:29 -07:00
|
|
|
#
|
|
|
|
# Which backend servers to prefer on writes. Format is r<N> for region
|
|
|
|
# N or r<N>z<M> for region N, zone M. If this is set, then when
|
|
|
|
# handling an object PUT request, some number (see setting
|
|
|
|
# write_affinity_node_count) of local backend servers will be tried
|
|
|
|
# before any nonlocal ones.
|
|
|
|
#
|
|
|
|
# Example: try to write to regions 1 and 2 before writing to any other
|
|
|
|
# nodes:
|
|
|
|
# write_affinity = r1, r2
|
|
|
|
# Default is empty, meaning no preference.
|
|
|
|
# write_affinity =
|
|
|
|
#
|
|
|
|
# The number of local (as governed by the write_affinity setting)
|
|
|
|
# nodes to attempt to contact first, before any non-local ones. You
|
|
|
|
# can use '* replicas' at the end to have it use the number given
|
|
|
|
# times the number of replicas for the ring being used for the
|
|
|
|
# request.
|
|
|
|
# write_affinity_node_count = 2 * replicas
|
2013-06-27 14:11:25 +00:00
|
|
|
#
|
|
|
|
# These are the headers whose values will only be shown to swift_owners. The
|
|
|
|
# exact definition of a swift_owner is up to the auth system in use, but
|
|
|
|
# usually indicates administrative responsibilities.
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control
|
2013-06-13 11:24:29 -07:00
|
|
|
|
2010-07-14 15:11:08 -07:00
|
|
|
|
2011-05-26 02:24:12 +00:00
|
|
|
[filter:tempauth]
|
|
|
|
use = egg:swift#tempauth
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this filter here:
|
2011-05-26 02:24:12 +00:00
|
|
|
# set log_name = tempauth
|
2011-01-23 13:18:28 -08:00
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-12-01 17:08:49 -08:00
|
|
|
# The reseller prefix will verify a token begins with this prefix before even
|
|
|
|
# attempting to validate it. Also, with authorization, only Swift storage
|
|
|
|
# accounts with this prefix will be authorized by this middleware. Useful if
|
|
|
|
# multiple auth systems are in use for one Swift cluster.
|
|
|
|
# reseller_prefix = AUTH
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-12-01 17:08:49 -08:00
|
|
|
# The auth prefix will cause requests beginning with this prefix to be routed
|
2011-05-26 01:19:03 +00:00
|
|
|
# to the auth subsystem, for granting tokens, etc.
|
2010-12-01 17:08:49 -08:00
|
|
|
# auth_prefix = /auth/
|
|
|
|
# token_life = 86400
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-12-21 13:54:07 +00:00
|
|
|
# This allows middleware higher in the WSGI pipeline to override auth
|
|
|
|
# processing, useful for middleware such as tempurl and formpost. If you know
|
|
|
|
# you're not going to use such middleware and you want a bit of extra security,
|
|
|
|
# you can set this to false.
|
|
|
|
# allow_overrides = true
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-11-10 16:39:25 +00:00
|
|
|
# This specifies what scheme to return with storage urls:
|
|
|
|
# http, https, or default (chooses based on what the server is running as)
|
|
|
|
# This can be useful with an SSL load balancer in front of a non-SSL server.
|
|
|
|
# storage_url_scheme = default
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2011-05-26 01:19:03 +00:00
|
|
|
# Lastly, you need to list all the accounts/users you want here. The format is:
|
|
|
|
# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
|
2012-10-01 21:43:34 -07:00
|
|
|
# or if you want underscores in <account> or <user>, you can base64 encode them
|
|
|
|
# (with no equal signs) and use this format:
|
|
|
|
# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
|
2011-05-26 01:19:03 +00:00
|
|
|
# There are special groups of:
|
|
|
|
# .reseller_admin = can do anything to any account for this auth
|
|
|
|
# .admin = can do anything within the account
|
|
|
|
# If neither of these groups are specified, the user can only access containers
|
|
|
|
# that have been explicitly allowed for them by a .admin or .reseller_admin.
|
|
|
|
# The trailing optional storage_url allows you to specify an alternate url to
|
|
|
|
# hand back to the user upon authentication. If not specified, this defaults to
|
2012-11-10 16:39:25 +00:00
|
|
|
# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
|
|
|
|
# to what the requester would need to use to reach this host.
|
2011-05-26 01:19:03 +00:00
|
|
|
# Here are example entries, required for running the tests:
|
|
|
|
user_admin_admin = admin .admin .reseller_admin
|
|
|
|
user_test_tester = testing .admin
|
|
|
|
user_test2_tester2 = testing2 .admin
|
|
|
|
user_test_tester3 = testing3
|
2010-12-01 17:08:49 -08:00
|
|
|
|
2012-06-20 16:37:30 +01:00
|
|
|
# To enable Keystone authentication you need to have the auth token
|
|
|
|
# middleware first to be configured. Here is an example below, please
|
|
|
|
# refer to the keystone's documentation for details about the
|
|
|
|
# different settings.
|
|
|
|
#
|
|
|
|
# You'll need to have as well the keystoneauth middleware enabled
|
|
|
|
# and have it in your main pipeline so instead of having tempauth in
|
2013-06-06 19:41:13 -06:00
|
|
|
# there you can change it to: authtoken keystoneauth
|
2012-06-20 16:37:30 +01:00
|
|
|
#
|
|
|
|
# [filter:authtoken]
|
2012-11-28 16:08:05 -05:00
|
|
|
# paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
2012-06-20 16:37:30 +01:00
|
|
|
# auth_host = keystonehost
|
|
|
|
# auth_port = 35357
|
|
|
|
# auth_protocol = http
|
|
|
|
# auth_uri = http://keystonehost:5000/
|
|
|
|
# admin_tenant_name = service
|
|
|
|
# admin_user = swift
|
|
|
|
# admin_password = password
|
|
|
|
# delay_auth_decision = 1
|
2013-02-21 22:58:27 +01:00
|
|
|
# cache = swift.cache
|
2013-12-02 13:40:01 +00:00
|
|
|
# include_service_catalog = False
|
2012-06-20 16:37:30 +01:00
|
|
|
#
|
|
|
|
# [filter:keystoneauth]
|
|
|
|
# use = egg:swift#keystoneauth
|
|
|
|
# Operator roles is the role which user would be allowed to manage a
|
|
|
|
# tenant and be able to create container or give ACL to others.
|
|
|
|
# operator_roles = admin, swiftoperator
|
2013-09-05 12:27:18 -07:00
|
|
|
# The reseller admin role has the ability to create and delete accounts
|
|
|
|
# reseller_admin_role = ResellerAdmin
|
2012-06-20 16:37:30 +01:00
|
|
|
|
2010-08-20 00:42:38 +00:00
|
|
|
[filter:healthcheck]
|
|
|
|
use = egg:swift#healthcheck
|
2012-12-03 16:05:44 -08:00
|
|
|
# An optional filesystem path, which if present, will cause the healthcheck
|
|
|
|
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
|
|
|
|
# This facility may be used to temporarily remove a Swift node from a load
|
|
|
|
# balancer pool during maintenance or upgrade (remove the file to allow the
|
|
|
|
# node back into the load balancer pool).
|
|
|
|
# disable_path =
|
2010-08-20 00:42:38 +00:00
|
|
|
|
|
|
|
[filter:cache]
|
2010-08-24 13:58:32 +00:00
|
|
|
use = egg:swift#memcache
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this filter here:
|
2011-03-14 02:56:37 +00:00
|
|
|
# set log_name = cache
|
2011-01-23 13:18:28 -08:00
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-06-28 16:09:10 +02:00
|
|
|
# If not set here, the value for memcache_servers will be read from
|
2012-01-06 21:27:44 +00:00
|
|
|
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
|
|
|
|
# default to the value below. You can specify multiple servers separated with
|
|
|
|
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
|
2010-08-20 00:42:38 +00:00
|
|
|
# memcache_servers = 127.0.0.1:11211
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 14:37:41 +02:00
|
|
|
#
|
|
|
|
# Sets how memcache values are serialized and deserialized:
|
|
|
|
# 0 = older, insecure pickle serialization
|
|
|
|
# 1 = json serialization but pickles can still be read (still insecure)
|
|
|
|
# 2 = json serialization only (secure and the default)
|
|
|
|
# If not set here, the value for memcache_serialization_support will be read
|
|
|
|
# from /etc/swift/memcache.conf (see memcache.conf-sample).
|
|
|
|
# To avoid an instant full cache flush, existing installations should
|
|
|
|
# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
|
|
|
|
# set to 2 and reload.
|
|
|
|
# In the future, the ability to use pickle serialization will be removed.
|
|
|
|
# memcache_serialization_support = 2
|
2013-11-19 22:55:09 -05:00
|
|
|
#
|
|
|
|
# Sets the maximum number of connections to each memcached server per worker
|
|
|
|
# memcache_max_connections = 2
|
2010-10-04 14:11:48 -07:00
|
|
|
|
2012-05-16 21:08:34 +00:00
|
|
|
[filter:ratelimit]
|
|
|
|
use = egg:swift#ratelimit
|
|
|
|
# You can override the default log routing for this filter here:
|
|
|
|
# set log_name = ratelimit
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
|
|
|
# are with each other. 1000 means that all the proxies' clock are accurate to
|
|
|
|
# each other within 1 millisecond. No ratelimit should be higher than the
|
|
|
|
# clock accuracy.
|
|
|
|
# clock_accuracy = 1000
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# max_sleep_time_seconds = 60
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# log_sleep_time_seconds of 0 means disabled
|
|
|
|
# log_sleep_time_seconds = 0
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
|
|
|
|
# rate_buffer_seconds = 5
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# account_ratelimit of 0 means disabled
|
|
|
|
# account_ratelimit = 0
|
|
|
|
|
|
|
|
# these are comma separated lists of account names
|
|
|
|
# account_whitelist = a,b
|
|
|
|
# account_blacklist = c,d
|
|
|
|
|
|
|
|
# with container_limit_x = r
|
2013-08-14 12:40:25 +00:00
|
|
|
# for containers of size x limit write requests per second to r. The container
|
2012-05-16 21:08:34 +00:00
|
|
|
# rate will be linearly interpolated from the values given. With the values
|
|
|
|
# below, a container of size 5 will get a rate of 75.
|
|
|
|
# container_ratelimit_0 = 100
|
|
|
|
# container_ratelimit_10 = 50
|
|
|
|
# container_ratelimit_50 = 20
|
|
|
|
|
2013-08-14 12:40:25 +00:00
|
|
|
# Similarly to the above container-level write limits, the following will limit
|
|
|
|
# container GET (listing) requests.
|
|
|
|
# container_listing_ratelimit_0 = 100
|
|
|
|
# container_listing_ratelimit_10 = 50
|
|
|
|
# container_listing_ratelimit_50 = 20
|
|
|
|
|
2012-05-17 10:43:44 -05:00
|
|
|
[filter:domain_remap]
|
|
|
|
use = egg:swift#domain_remap
|
|
|
|
# You can override the default log routing for this filter here:
|
|
|
|
# set log_name = domain_remap
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-17 10:43:44 -05:00
|
|
|
# storage_domain = example.com
|
|
|
|
# path_root = v1
|
|
|
|
# reseller_prefixes = AUTH
|
|
|
|
|
2010-11-03 13:17:59 -05:00
|
|
|
[filter:catch_errors]
|
2010-11-03 13:04:04 -05:00
|
|
|
use = egg:swift#catch_errors
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this filter here:
|
2011-03-14 02:56:37 +00:00
|
|
|
# set log_name = catch_errors
|
2011-01-23 13:18:28 -08:00
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2010-11-03 13:04:04 -05:00
|
|
|
|
2012-05-16 21:08:34 +00:00
|
|
|
[filter:cname_lookup]
|
|
|
|
# Note: this middleware requires python-dnspython
|
|
|
|
use = egg:swift#cname_lookup
|
|
|
|
# You can override the default log routing for this filter here:
|
|
|
|
# set log_name = cname_lookup
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_headers = false
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-05-16 21:08:34 +00:00
|
|
|
# storage_domain = example.com
|
|
|
|
# lookup_depth = 1
|
|
|
|
|
|
|
|
# Note: Put staticweb just after your auth filter(s) in the pipeline
|
|
|
|
[filter:staticweb]
|
|
|
|
use = egg:swift#staticweb
|
|
|
|
|
|
|
|
# Note: Put tempurl just before your auth filter(s) in the pipeline
|
|
|
|
[filter:tempurl]
|
|
|
|
use = egg:swift#tempurl
|
2013-04-04 16:44:22 +00:00
|
|
|
# The methods allowed with Temp URLs.
|
|
|
|
# methods = GET HEAD PUT
|
2012-05-16 21:08:34 +00:00
|
|
|
#
|
|
|
|
# The headers to remove from incoming requests. Simply a whitespace delimited
|
|
|
|
# list of header names and names can optionally end with '*' to indicate a
|
|
|
|
# prefix match. incoming_allow_headers is a list of exceptions to these
|
|
|
|
# removals.
|
|
|
|
# incoming_remove_headers = x-timestamp
|
|
|
|
#
|
|
|
|
# The headers allowed as exceptions to incoming_remove_headers. Simply a
|
|
|
|
# whitespace delimited list of header names and names can optionally end with
|
|
|
|
# '*' to indicate a prefix match.
|
|
|
|
# incoming_allow_headers =
|
|
|
|
#
|
|
|
|
# The headers to remove from outgoing responses. Simply a whitespace delimited
|
|
|
|
# list of header names and names can optionally end with '*' to indicate a
|
|
|
|
# prefix match. outgoing_allow_headers is a list of exceptions to these
|
|
|
|
# removals.
|
|
|
|
# outgoing_remove_headers = x-object-meta-*
|
|
|
|
#
|
|
|
|
# The headers allowed as exceptions to outgoing_remove_headers. Simply a
|
|
|
|
# whitespace delimited list of header names and names can optionally end with
|
|
|
|
# '*' to indicate a prefix match.
|
|
|
|
# outgoing_allow_headers = x-object-meta-public-*
|
|
|
|
|
|
|
|
# Note: Put formpost just before your auth filter(s) in the pipeline
|
|
|
|
[filter:formpost]
|
|
|
|
use = egg:swift#formpost
|
|
|
|
|
2012-03-02 11:23:17 +00:00
|
|
|
# Note: Just needs to be placed before the proxy-server in the pipeline.
|
|
|
|
[filter:name_check]
|
|
|
|
use = egg:swift#name_check
|
|
|
|
# forbidden_chars = '"`<>
|
|
|
|
# maximum_length = 255
|
2012-07-05 15:43:14 +02:00
|
|
|
# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
|
2012-05-24 21:15:51 -07:00
|
|
|
|
2013-02-01 22:50:21 +04:00
|
|
|
[filter:list-endpoints]
|
|
|
|
use = egg:swift#list_endpoints
|
|
|
|
# list_endpoints_path = /endpoints/
|
|
|
|
|
2012-05-24 21:15:51 -07:00
|
|
|
[filter:proxy-logging]
|
|
|
|
use = egg:swift#proxy_logging
|
2012-12-03 16:05:44 -08:00
|
|
|
# If not set, logging directives from [DEFAULT] without "access_" will be used
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# access_log_name = swift
|
|
|
|
# access_log_facility = LOG_LOCAL0
|
|
|
|
# access_log_level = INFO
|
|
|
|
# access_log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# If set, access_log_udp_host will override access_log_address
|
|
|
|
# access_log_udp_host =
|
|
|
|
# access_log_udp_port = 514
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# You can use log_statsd_* from [DEFAULT] or override them here:
|
|
|
|
# access_log_statsd_host = localhost
|
|
|
|
# access_log_statsd_port = 8125
|
2013-01-19 15:25:27 -08:00
|
|
|
# access_log_statsd_default_sample_rate = 1.0
|
|
|
|
# access_log_statsd_sample_rate_factor = 1.0
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# access_log_statsd_metric_prefix =
|
2013-07-21 12:18:24 +08:00
|
|
|
# access_log_headers = false
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-07-23 15:10:09 +01:00
|
|
|
# By default, the X-Auth-Token is logged. To obscure the value,
|
|
|
|
# set reveal_sensitive_prefix to the number of characters to log.
|
|
|
|
# For example, if set to 12, only the first 12 characters of the
|
|
|
|
# token appear in the log. An unauthorized access of the log file
|
|
|
|
# won't allow unauthorized usage of the token. However, the first
|
|
|
|
# 12 or so characters is unique enough that you can trace/debug
|
|
|
|
# token usage. Set to 0 to suppress the token completely (replaced
|
|
|
|
# by '...' in the log).
|
|
|
|
# Note: reveal_sensitive_prefix will not affect the value
|
|
|
|
# logged with access_log_headers=True.
|
|
|
|
# reveal_sensitive_prefix = 8192
|
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
|
|
|
|
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
|
2012-11-06 15:13:01 -08:00
|
|
|
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
|
2013-03-25 11:27:36 -05:00
|
|
|
#
|
2013-02-06 10:57:17 -08:00
|
|
|
# Note: The double proxy-logging in the pipeline is not a mistake. The
|
|
|
|
# left-most proxy-logging is there to log requests that were handled in
|
|
|
|
# middleware and never made it through to the right-most middleware (and
|
|
|
|
# proxy server). Double logging is prevented for normal requests. See
|
|
|
|
# proxy-logging docs.
|
2013-01-24 12:34:56 -08:00
|
|
|
|
|
|
|
# Note: Put before both ratelimit and auth in the pipeline.
|
|
|
|
[filter:bulk]
|
|
|
|
use = egg:swift#bulk
|
|
|
|
# max_containers_per_extraction = 10000
|
2013-04-30 14:45:46 -07:00
|
|
|
# max_failed_extractions = 1000
|
|
|
|
# max_deletes_per_request = 10000
|
2013-11-27 18:24:17 -08:00
|
|
|
# max_failed_deletes = 1000
|
2014-01-17 09:56:52 -08:00
|
|
|
|
|
|
|
# In order to keep a connection active during a potentially long bulk request,
|
|
|
|
# Swift may return whitespace prepended to the actual response body. This
|
|
|
|
# whitespace will be yielded no more than every yield_frequency seconds.
|
|
|
|
# yield_frequency = 10
|
2013-01-31 21:53:47 -08:00
|
|
|
|
2013-12-05 17:19:50 -03:00
|
|
|
# Note: The following parameter is used during a bulk delete of objects and
|
|
|
|
# their container. This would frequently fail because it is very likely
|
|
|
|
# that all replicated objects have not been deleted by the time the middleware got a
|
|
|
|
# successful response. It can be configured the number of retries. And the
|
|
|
|
# number of seconds to wait between each retry will be 1.5**retry
|
|
|
|
|
|
|
|
# delete_container_retry_count = 0
|
|
|
|
|
2013-01-31 21:53:47 -08:00
|
|
|
# Note: Put after auth in the pipeline.
|
|
|
|
[filter:container-quotas]
|
|
|
|
use = egg:swift#container_quotas
|
2013-02-13 12:31:55 -08:00
|
|
|
|
|
|
|
# Note: Put before both ratelimit and auth in the pipeline.
|
|
|
|
[filter:slo]
|
|
|
|
use = egg:swift#slo
|
|
|
|
# max_manifest_segments = 1000
|
|
|
|
# max_manifest_size = 2097152
|
|
|
|
# min_segment_size = 1048576
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
# Start rate-limiting SLO segment serving after the Nth segment of a
|
|
|
|
# segmented object.
|
|
|
|
# rate_limit_after_segment = 10
|
|
|
|
#
|
|
|
|
# Once segment rate-limiting kicks in for an object, limit segments served
|
|
|
|
# to N per second. 0 means no rate-limiting.
|
|
|
|
# rate_limit_segments_per_sec = 0
|
|
|
|
#
|
2014-01-13 18:57:17 -08:00
|
|
|
# Time limit on GET requests (seconds)
|
|
|
|
# max_get_time = 86400
|
2013-03-04 17:53:44 +01:00
|
|
|
|
2013-11-21 17:31:16 -08:00
|
|
|
# Note: Put before both ratelimit and auth in the pipeline, but after
|
|
|
|
# gatekeeper, catch_errors, and proxy_logging (the first instance).
|
|
|
|
# If you don't put it in the pipeline, it will be inserted for you.
|
|
|
|
[filter:dlo]
|
|
|
|
use = egg:swift#dlo
|
|
|
|
# Start rate-limiting DLO segment serving after the Nth segment of a
|
|
|
|
# segmented object.
|
|
|
|
# rate_limit_after_segment = 10
|
|
|
|
#
|
|
|
|
# Once segment rate-limiting kicks in for an object, limit segments served
|
|
|
|
# to N per second. 0 means no rate-limiting.
|
|
|
|
# rate_limit_segments_per_sec = 1
|
|
|
|
#
|
|
|
|
# Time limit on GET requests (seconds)
|
|
|
|
# max_get_time = 86400
|
|
|
|
|
2013-03-04 17:53:44 +01:00
|
|
|
[filter:account-quotas]
|
|
|
|
use = egg:swift#account_quotas
|
2013-12-03 22:02:39 +00:00
|
|
|
|
|
|
|
[filter:gatekeeper]
|
|
|
|
use = egg:swift#gatekeeper
|
|
|
|
# You can override the default log routing for this filter here:
|
|
|
|
# set log_name = gatekeeper
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
|
|
|
# set log_headers = false
|
|
|
|
# set log_address = /dev/log
|
2013-12-08 09:13:59 +00:00
|
|
|
|
|
|
|
[filter:container_sync]
|
|
|
|
use = egg:swift#container_sync
|
|
|
|
# Set this to false if you want to disallow any full url values to be set for
|
|
|
|
# any new X-Container-Sync-To headers. This will keep any new full urls from
|
|
|
|
# coming in, but won't change any existing values already in the cluster.
|
|
|
|
# Updating those will have to be done manually, as knowing what the true realm
|
|
|
|
# endpoint should be cannot always be guessed.
|
|
|
|
# allow_full_urls = true
|