2010-08-20 00:42:38 +00:00
|
|
|
[DEFAULT]
|
2010-07-12 17:03:45 -05:00
|
|
|
# bind_ip = 0.0.0.0
|
2016-02-01 18:06:54 +00:00
|
|
|
bind_port = 6202
|
2012-11-26 12:39:46 -08:00
|
|
|
# bind_timeout = 30
|
2010-10-13 21:24:30 +00:00
|
|
|
# backlog = 4096
|
2010-07-12 17:03:45 -05:00
|
|
|
# user = swift
|
2010-08-20 00:42:38 +00:00
|
|
|
# swift_dir = /etc/swift
|
|
|
|
# devices = /srv/node
|
|
|
|
# mount_check = true
|
2012-08-29 19:57:26 +00:00
|
|
|
# disable_fallocate = false
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-07-11 17:00:57 -07:00
|
|
|
# Use an integer to override the number of pre-forked processes that will
|
|
|
|
# accept connections.
|
|
|
|
# workers = auto
|
|
|
|
#
|
|
|
|
# Maximum concurrent requests per worker
|
|
|
|
# max_clients = 1024
|
|
|
|
#
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can specify default log routing here if you want:
|
|
|
|
# log_name = swift
|
|
|
|
# log_facility = LOG_LOCAL0
|
|
|
|
# log_level = INFO
|
2012-05-17 15:46:38 -07:00
|
|
|
# log_address = /dev/log
|
2014-05-22 19:37:53 +00:00
|
|
|
# The following caps the length of log lines to the value given; no limit if
|
|
|
|
# set to 0, the default.
|
|
|
|
# log_max_line_length = 0
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-10-05 15:56:34 -05:00
|
|
|
# comma separated list of functions to call to setup custom log handlers.
|
|
|
|
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
|
|
|
# adapted_logger
|
|
|
|
# log_custom_handlers =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# If set, log_udp_host will override log_address
|
|
|
|
# log_udp_host =
|
|
|
|
# log_udp_port = 514
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
Upating proxy-server StatsD logging.
Removed many StatsD logging calls in proxy-server and added
swift-informant-style catch-all logging in the proxy-logger middleware.
Many errors previously rolled into the "proxy-server.<type>.errors"
counter will now appear broken down by response code and with timing
data at: "proxy-server.<type>.<verb>.<status>.timing". Also, bytes
transferred (sum of in + out) will be at:
"proxy-server.<type>.<verb>.<status>.xfer". The proxy-logging
middleware can get its StatsD config from standard vars in [DEFAULT] or
from access_log_statsd_* config vars in its config section.
Similarly to Swift Informant, request methods ("verbs") are filtered
using the new proxy-logging config var, "log_statsd_valid_http_methods"
which defaults to GET, HEAD, POST, PUT, DELETE, and COPY. Requests with
methods not in this list use "BAD_METHOD" for <verb> in the metric name.
To avoid user error, access_log_statsd_valid_http_methods is also
accepted.
Previously, proxy-server metrics used "Account", "Container", and
"Object" for the <type>, but these are now all lowercase.
Updated the admin guide's StatsD docs to reflect the above changes and
also include the "proxy-server.<type>.handoff_count" and
"proxy-server.<type>.handoff_all_count" metrics.
The proxy server now saves off the original req.method and proxy_logging
will use this if it can (both for request logging and as the "<verb>" in
the statsd timing metric). This fixes bug 1025433.
Removed some stale access_log_* related code in proxy/server.py. Also
removed the BaseApplication/Application distinction as it's no longer
necessary.
Fixed up the sample config files a bit (logging lines, mostly).
Fixed typo in SAIO development guide.
Got proxy_logging.py test coverage to 100%.
Fixed proxy_logging.py for PEP8 v1.3.2.
Enhanced test.unit.FakeLogger to track more calls to enable testing
StatsD metric calls.
Change-Id: I45d94cb76450be96d66fcfab56359bdfdc3a2576
2012-08-19 17:44:43 -07:00
|
|
|
# You can enable StatsD logging here:
|
2016-02-10 10:36:59 -06:00
|
|
|
# log_statsd_host =
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
# log_statsd_port = 8125
|
2013-01-19 15:25:27 -08:00
|
|
|
# log_statsd_default_sample_rate = 1.0
|
|
|
|
# log_statsd_sample_rate_factor = 1.0
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
# log_statsd_metric_prefix =
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
!! Changed db_preallocation to False
Long explanation, but hopefully answers any questions.
We don't like changing the default behavior of Swift unless there's a
really good reason and, up until now, I've tried doing this with this
new db_preallocation setting.
For clusters with dedicated account/container servers that usually
have fewer disks overall but SSD for speed, having db_preallocation
on will gobble up disk space quite quickly and the fragmentation it's
designed to fight isn't that big a speed impact to SSDs anyway.
For clusters with account/container servers spread across all servers
along with object servers usually having standard disks for cost,
having db_preallocation off will cause very fragmented database files
impacting speed, sometimes dramatically.
Weighing these two negatives, it seems the second is the lesser evil.
The first can cause disks to fill up and disable the cluster. The
second will cause performance degradation, but the cluster will still
function.
Furthermore, if just one piece of code that touches all databases
runs with db_preallocation on, it's effectively on for the whole
cluster. We discovered this most recently when we finally configured
everything within the Swift codebase to have db_preallocation off,
only to find out Slogging didn't know about the new setting and so
ran with it on and starting filling up SSDs.
So that's why I'm proposing this change to the default behavior.
We will definitely need to post a prominent notice of this change
with the next release.
Change-Id: I48a43439264cff5d03c14ec8787f718ee44e78ea
2012-05-22 00:30:47 +00:00
|
|
|
# If you don't mind the extra disk space usage in overhead, you can turn this
|
|
|
|
# on to preallocate disk space with SQLite databases to decrease fragmentation.
|
|
|
|
# db_preallocation = off
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-12-06 15:09:53 -06:00
|
|
|
# eventlet_debug = false
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2016-03-03 11:14:39 +00:00
|
|
|
# You can set fallocate_reserve to the number of bytes or percentage of disk
|
|
|
|
# space you'd like fallocate to reserve, whether there is space for the given
|
|
|
|
# file size or not. Percentage will be used if the value ends with a '%'.
|
|
|
|
# fallocate_reserve = 1%
|
2015-10-22 10:19:49 +02:00
|
|
|
#
|
|
|
|
# You can set scheduling priority of processes. Niceness values range from -20
|
|
|
|
# (most favorable to the process) to 19 (least favorable to the process).
|
|
|
|
# nice_priority =
|
|
|
|
#
|
|
|
|
# You can set I/O scheduling class and priority of processes. I/O niceness
|
|
|
|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
|
|
|
|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
|
|
|
|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
|
|
|
|
# Work only with ionice_class.
|
|
|
|
# ionice_class =
|
|
|
|
# ionice_priority =
|
2010-08-20 00:42:38 +00:00
|
|
|
|
|
|
|
[pipeline:main]
|
2012-12-03 16:05:44 -08:00
|
|
|
pipeline = healthcheck recon account-server
|
2010-08-20 00:42:38 +00:00
|
|
|
|
|
|
|
[app:account-server]
|
|
|
|
use = egg:swift#account
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this app here:
|
|
|
|
# set log_name = account-server
|
|
|
|
# set log_facility = LOG_LOCAL0
|
|
|
|
# set log_level = INFO
|
2013-07-21 12:18:24 +08:00
|
|
|
# set log_requests = true
|
2012-05-17 15:46:38 -07:00
|
|
|
# set log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-04-28 16:31:00 +10:00
|
|
|
# auto_create_account_prefix = .
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-12-17 06:39:25 -05:00
|
|
|
# Configure parameter for creating specific server
|
|
|
|
# To handle all verbs, including replication verbs, do not specify
|
|
|
|
# "replication_server" (this is the default). To only handle replication,
|
|
|
|
# set to a True value (e.g. "True" or "1"). To handle only non-replication
|
|
|
|
# verbs, set to "False". Unless you have a separate replication network, you
|
2015-11-24 16:33:35 +01:00
|
|
|
# should not specify any value for "replication_server". Default is empty.
|
2013-07-21 12:18:24 +08:00
|
|
|
# replication_server = false
|
2015-10-22 10:19:49 +02:00
|
|
|
#
|
|
|
|
# You can set scheduling priority of processes. Niceness values range from -20
|
|
|
|
# (most favorable to the process) to 19 (least favorable to the process).
|
|
|
|
# nice_priority =
|
|
|
|
#
|
|
|
|
# You can set I/O scheduling class and priority of processes. I/O niceness
|
|
|
|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
|
|
|
|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
|
|
|
|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
|
|
|
|
# Work only with ionice_class.
|
|
|
|
# ionice_class =
|
|
|
|
# ionice_priority =
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-12-03 16:05:44 -08:00
|
|
|
[filter:healthcheck]
|
|
|
|
use = egg:swift#healthcheck
|
|
|
|
# An optional filesystem path, which if present, will cause the healthcheck
|
|
|
|
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
|
|
|
|
# disable_path =
|
|
|
|
|
2012-05-14 18:01:48 -05:00
|
|
|
[filter:recon]
|
|
|
|
use = egg:swift#recon
|
|
|
|
# recon_cache_path = /var/cache/swift
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
[account-replicator]
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this app here (don't use set!):
|
2010-08-24 13:41:58 +00:00
|
|
|
# log_name = account-replicator
|
2011-01-23 13:18:28 -08:00
|
|
|
# log_facility = LOG_LOCAL0
|
|
|
|
# log_level = INFO
|
2012-05-17 15:46:38 -07:00
|
|
|
# log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2015-10-19 13:55:02 +01:00
|
|
|
# Maximum number of database rows that will be sync'd in a single HTTP
|
|
|
|
# replication request. Databases with less than or equal to this number of
|
|
|
|
# differing rows will always be sync'd using an HTTP replication request rather
|
|
|
|
# than using rsync.
|
2010-07-12 17:03:45 -05:00
|
|
|
# per_diff = 1000
|
2015-10-19 13:55:02 +01:00
|
|
|
#
|
|
|
|
# Maximum number of HTTP replication requests attempted on each replication
|
|
|
|
# pass for any one container. This caps how long the replicator will spend
|
|
|
|
# trying to sync a given database per pass so the other databases don't get
|
|
|
|
# starved.
|
2012-03-06 03:24:16 +00:00
|
|
|
# max_diffs = 100
|
2015-10-19 13:55:02 +01:00
|
|
|
#
|
|
|
|
# Number of replication workers to spawn.
|
2010-07-12 17:03:45 -05:00
|
|
|
# concurrency = 8
|
2014-10-21 09:24:25 +00:00
|
|
|
#
|
|
|
|
# Time in seconds to wait between replication passes
|
2012-03-06 03:24:16 +00:00
|
|
|
# interval = 30
|
2014-10-21 09:24:25 +00:00
|
|
|
# run_pause is deprecated, use interval instead
|
|
|
|
# run_pause = 30
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# node_timeout = 10
|
|
|
|
# conn_timeout = 0.5
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# The replicator also performs reclamation
|
2012-04-28 16:31:00 +10:00
|
|
|
# reclaim_age = 604800
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2015-01-20 12:14:32 +05:30
|
|
|
# Allow rsync to compress data which is transmitted to destination node
|
|
|
|
# during sync. However, this is applicable only when destination node is in
|
|
|
|
# a different region than the local one.
|
|
|
|
# rsync_compress = no
|
|
|
|
#
|
2016-10-19 20:17:00 +02:00
|
|
|
# Format of the rsync module where the replicator will send data. See
|
Allows to configure the rsync modules where the replicators will send data
Currently, the rsync module where the replicators send data is static. It
forbids administrators to set rsync configuration based on their current
deployment or needs.
As an example, the rsyncd configuration example encourages to set a connections
limit for the modules account, container and object. It permits to protect
devices from excessives parallels connections, because it would impact
performances.
On a server with many devices, it is tempting to increase this number
proportionally, but nothing guarantees that the distribution of the connections
will be balanced. In the worst scenario, a single device can receive all the
connections, which is a severe impact on performances.
This commit adds a new option named 'rsync_module' to the *-replicator sections
of the *-server configuration file. This configuration variable can be
extrapolated with device attributes like ip, port, device, zone, ... by using
the format {NAME}. eg:
rsync_module = {replication_ip}::object_{device}
With this configuration, an administrators can solve the problem of connections
distribution by creating one module per device in rsyncd configuration.
The default values are backward compatible:
{replication_ip}::account
{replication_ip}::container
{replication_ip}::object
Option vm_test_mode is deprecated by this commit, but backward compatibility is
maintained. The option is only effective when rsync_module is not set. In that
case, {replication_port} is appended to the default value of rsync_module.
Change-Id: Iad91df50dadbe96c921181797799b4444323ce2e
2015-06-16 12:47:26 +02:00
|
|
|
# etc/rsyncd.conf-sample for some usage examples.
|
|
|
|
# rsync_module = {replication_ip}::account
|
|
|
|
#
|
2012-05-14 18:01:48 -05:00
|
|
|
# recon_cache_path = /var/cache/swift
|
2015-10-22 10:19:49 +02:00
|
|
|
#
|
|
|
|
# You can set scheduling priority of processes. Niceness values range from -20
|
|
|
|
# (most favorable to the process) to 19 (least favorable to the process).
|
|
|
|
# nice_priority =
|
|
|
|
#
|
|
|
|
# You can set I/O scheduling class and priority of processes. I/O niceness
|
|
|
|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
|
|
|
|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
|
|
|
|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
|
|
|
|
# Work only with ionice_class.
|
|
|
|
# ionice_class =
|
|
|
|
# ionice_priority =
|
2018-02-16 16:37:58 -08:00
|
|
|
#
|
|
|
|
# The handoffs_only mode option is for special-case emergency
|
|
|
|
# situations such as full disks in the cluster. This option SHOULD NOT
|
|
|
|
# BE ENABLED except in emergencies. When handoffs_only mode is enabled
|
|
|
|
# the replicator will *only* replicate from handoff nodes to primary
|
|
|
|
# nodes and will not sync primary nodes with other primary nodes.
|
|
|
|
#
|
|
|
|
# This has two main effects: first, the replicator becomes much more
|
|
|
|
# effective at removing misplaced databases, thereby freeing up disk
|
|
|
|
# space at a much faster pace than normal. Second, the replicator does
|
|
|
|
# not sync data between primary nodes, so out-of-sync account and
|
|
|
|
# container listings will not resolve while handoffs_only is enabled.
|
|
|
|
#
|
|
|
|
# This mode is intended to allow operators to temporarily sacrifice
|
|
|
|
# consistency in order to gain faster rebalancing, such as during a
|
|
|
|
# capacity addition with nearly-full disks. It is not intended for
|
|
|
|
# long-term use.
|
|
|
|
#
|
|
|
|
# handoffs_only = no
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
[account-auditor]
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this app here (don't use set!):
|
2010-08-24 13:41:58 +00:00
|
|
|
# log_name = account-auditor
|
2011-01-23 13:18:28 -08:00
|
|
|
# log_facility = LOG_LOCAL0
|
|
|
|
# log_level = INFO
|
2012-05-17 15:46:38 -07:00
|
|
|
# log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-02-25 14:48:06 -08:00
|
|
|
# Will audit each account at most once per interval
|
2010-07-12 17:03:45 -05:00
|
|
|
# interval = 1800
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2013-02-19 13:14:32 +08:00
|
|
|
# accounts_per_second = 200
|
2012-05-14 18:01:48 -05:00
|
|
|
# recon_cache_path = /var/cache/swift
|
2015-10-22 10:19:49 +02:00
|
|
|
#
|
|
|
|
# You can set scheduling priority of processes. Niceness values range from -20
|
|
|
|
# (most favorable to the process) to 19 (least favorable to the process).
|
|
|
|
# nice_priority =
|
|
|
|
#
|
|
|
|
# You can set I/O scheduling class and priority of processes. I/O niceness
|
|
|
|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
|
|
|
|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
|
|
|
|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
|
|
|
|
# Work only with ionice_class.
|
|
|
|
# ionice_class =
|
|
|
|
# ionice_priority =
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
[account-reaper]
|
2011-01-23 13:18:28 -08:00
|
|
|
# You can override the default log routing for this app here (don't use set!):
|
2010-08-24 13:41:58 +00:00
|
|
|
# log_name = account-reaper
|
2011-01-23 13:18:28 -08:00
|
|
|
# log_facility = LOG_LOCAL0
|
|
|
|
# log_level = INFO
|
2012-05-17 15:46:38 -07:00
|
|
|
# log_address = /dev/log
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2010-07-12 17:03:45 -05:00
|
|
|
# concurrency = 25
|
|
|
|
# interval = 3600
|
|
|
|
# node_timeout = 10
|
|
|
|
# conn_timeout = 0.5
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2012-03-16 17:10:36 +00:00
|
|
|
# Normally, the reaper begins deleting account information for deleted accounts
|
|
|
|
# immediately; you can set this to delay its work however. The value is in
|
2017-09-27 16:35:27 +01:00
|
|
|
# seconds; 2592000 = 30 days for example. The sum of this value and the
|
|
|
|
# container-updater interval should be less than the account-replicator
|
|
|
|
# reclaim_age. This ensures that once the account-reaper has deleted a
|
|
|
|
# container there is sufficient time for the container-updater to report to the
|
|
|
|
# account before the account DB is removed.
|
2012-03-16 17:10:36 +00:00
|
|
|
# delay_reaping = 0
|
2013-06-06 15:35:19 -07:00
|
|
|
#
|
2016-08-09 13:17:38 +09:00
|
|
|
# If the account fails to be reaped due to a persistent error, the
|
2013-05-20 16:52:54 +01:00
|
|
|
# account reaper will log a message such as:
|
|
|
|
# Account <name> has not been reaped since <date>
|
|
|
|
# You can search logs for this message if space is not being reclaimed
|
|
|
|
# after you delete account(s).
|
|
|
|
# Default is 2592000 seconds (30 days). This is in addition to any time
|
|
|
|
# requested by delay_reaping.
|
|
|
|
# reap_warn_after = 2592000
|
2015-10-22 10:19:49 +02:00
|
|
|
#
|
|
|
|
# You can set scheduling priority of processes. Niceness values range from -20
|
|
|
|
# (most favorable to the process) to 19 (least favorable to the process).
|
|
|
|
# nice_priority =
|
|
|
|
#
|
|
|
|
# You can set I/O scheduling class and priority of processes. I/O niceness
|
|
|
|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
|
|
|
|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
|
|
|
|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
|
|
|
|
# Work only with ionice_class.
|
|
|
|
# ionice_class =
|
|
|
|
# ionice_priority =
|
2013-10-24 03:40:06 +08:00
|
|
|
|
2014-09-18 21:16:35 -07:00
|
|
|
# Note: Put it at the beginning of the pipeline to profile all middleware. But
|
2013-10-24 03:40:06 +08:00
|
|
|
# it is safer to put this after healthcheck.
|
|
|
|
[filter:xprofile]
|
|
|
|
use = egg:swift#xprofile
|
|
|
|
# This option enable you to switch profilers which should inherit from python
|
|
|
|
# standard profiler. Currently the supported value can be 'cProfile',
|
|
|
|
# 'eventlet.green.profile' etc.
|
|
|
|
# profile_module = eventlet.green.profile
|
|
|
|
#
|
|
|
|
# This prefix will be used to combine process ID and timestamp to name the
|
|
|
|
# profile data file. Make sure the executing user has permission to write
|
|
|
|
# into this path (missing path segments will be created, if necessary).
|
|
|
|
# If you enable profiling in more than one type of daemon, you must override
|
|
|
|
# it with an unique value like: /var/log/swift/profile/account.profile
|
|
|
|
# log_filename_prefix = /tmp/log/swift/profile/default.profile
|
|
|
|
#
|
|
|
|
# the profile data will be dumped to local disk based on above naming rule
|
|
|
|
# in this interval.
|
|
|
|
# dump_interval = 5.0
|
|
|
|
#
|
|
|
|
# Be careful, this option will enable profiler to dump data into the file with
|
|
|
|
# time stamp which means there will be lots of files piled up in the directory.
|
|
|
|
# dump_timestamp = false
|
|
|
|
#
|
|
|
|
# This is the path of the URL to access the mini web UI.
|
|
|
|
# path = /__profile__
|
|
|
|
#
|
|
|
|
# Clear the data when the wsgi server shutdown.
|
|
|
|
# flush_at_shutdown = false
|
|
|
|
#
|
|
|
|
# unwind the iterator of applications
|
|
|
|
# unwind = false
|