swift/etc/account-server.conf-sample
Peter Portante 2d42b37303 Add the max_clients parameter to bound clients
The new max_clients parameter allows one full control over the maximum
number of client requests that will be handled by a given worker for
any of the proxy, account, container or object servers.

Lowering the number of clients handled per worker, and raising the
number of workers can lessen the impact that a CPU intensive, or
blocking, request can have on other requests served by the same
worker.

If the maximum number of clients is set to one, then a given worker
will not perform another accept(2) call while processing, allowing
other workers a chance to process it.

DocImpact
Signed-off-by: Peter Portante <peter.portante@redhat.com>

Change-Id: Ic01430f7a6c5ff48d7aa349dc86a5f8ac463a420
2013-04-26 10:29:57 -04:00

113 lines
3.5 KiB
Plaintext

[DEFAULT]
# bind_ip = 0.0.0.0
# bind_port = 6002
# bind_timeout = 30
# backlog = 4096
# workers = 1
# user = swift
# swift_dir = /etc/swift
# devices = /srv/node
# mount_check = true
# disable_fallocate = false
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
# If you don't mind the extra disk space usage in overhead, you can turn this
# on to preallocate disk space with SQLite databases to decrease fragmentation.
# db_preallocation = off
# eventlet_debug = false
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
# reserve, whether there is space for the given file size or not.
# fallocate_reserve = 0
[pipeline:main]
pipeline = healthcheck recon account-server
[app:account-server]
use = egg:swift#account
# You can override the default log routing for this app here:
# set log_name = account-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = True
# set log_address = /dev/log
# auto_create_account_prefix = .
# max_clients = 1024
[filter:healthcheck]
use = egg:swift#healthcheck
# An optional filesystem path, which if present, will cause the healthcheck
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
# disable_path =
[filter:recon]
use = egg:swift#recon
# recon_cache_path = /var/cache/swift
[account-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-replicator
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
# vm_test_mode = no
# per_diff = 1000
# max_diffs = 100
# concurrency = 8
# interval = 30
# How long without an error before a node's error count is reset. This will
# also be how long before a node is reenabled after suppression is triggered.
# error_suppression_interval = 60
# How many errors can accumulate before a node is temporarily ignored.
# error_suppression_limit = 10
# node_timeout = 10
# conn_timeout = 0.5
# The replicator also performs reclamation
# reclaim_age = 604800
# Time in seconds to wait between replication passes
# run_pause = 30
# recon_cache_path = /var/cache/swift
[account-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-auditor
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
# Will audit each account at most once per interval
# interval = 1800
# log_facility = LOG_LOCAL0
# log_level = INFO
# accounts_per_second = 200
# recon_cache_path = /var/cache/swift
[account-reaper]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-reaper
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
# concurrency = 25
# interval = 3600
# node_timeout = 10
# conn_timeout = 0.5
# Normally, the reaper begins deleting account information for deleted accounts
# immediately; you can set this to delay its work however. The value is in
# seconds; 2592000 = 30 days for example.
# delay_reaping = 0