Merge "Add the max_clients parameter to bound clients"

This commit is contained in:
Jenkins 2013-04-30 05:57:10 +00:00 committed by Gerrit Code Review
commit 52a6595033
10 changed files with 122 additions and 26 deletions

View File

@ -114,6 +114,12 @@ Logging level. The default is INFO.
Enables request logging. The default is True. Enables request logging. The default is True.
.IP "\fB set log_address\fR .IP "\fB set log_address\fR
Logging address. The default is /dev/log. Logging address. The default is /dev/log.
.IP \fBmax_clients\fR
Maximum number of clients one worker can process simultaneously (it will
actually accept(2) N + 1). Setting this to one (1) will only handle one request
at a time, without accepting another request concurrently. By increasing the
number of workers to a much higher value, one can reduce the impact of slow file system
operations in one request from negatively impacting other requests. The default is 1024.
.RE .RE
.PD .PD
@ -253,5 +259,3 @@ and
.SH "SEE ALSO" .SH "SEE ALSO"
.BR swift-account-server(1), .BR swift-account-server(1),

View File

@ -120,6 +120,12 @@ Logging address. The default is /dev/log.
Request timeout to external services. The default is 3 seconds. Request timeout to external services. The default is 3 seconds.
.IP \fBconn_timeout\fR .IP \fBconn_timeout\fR
Connection timeout to external services. The default is 0.5 seconds. Connection timeout to external services. The default is 0.5 seconds.
.IP \fBmax_clients\fR
Maximum number of clients one worker can process simultaneously (it will
actually accept(2) N + 1). Setting this to one (1) will only handle one request
at a time, without accepting another request concurrently. By increasing the
number of workers to a much higher value, one can reduce the impact of slow file system
operations in one request from negatively impacting other requests. The default is 1024.
.RE .RE
.PD .PD
@ -278,5 +284,3 @@ and
.SH "SEE ALSO" .SH "SEE ALSO"
.BR swift-container-server(1), .BR swift-container-server(1),

View File

@ -120,6 +120,12 @@ Logging address. The default is /dev/log.
Request timeout to external services. The default is 3 seconds. Request timeout to external services. The default is 3 seconds.
.IP \fBconn_timeout\fR .IP \fBconn_timeout\fR
Connection timeout to external services. The default is 0.5 seconds. Connection timeout to external services. The default is 0.5 seconds.
.IP \fBmax_clients\fR
Maximum number of clients one worker can process simultaneously (it will
actually accept(2) N + 1). Setting this to one (1) will only handle one request
at a time, without accepting another request concurrently. By increasing the
number of workers to a much higher value, one can reduce the impact of slow file system
operations in one request from negatively impacting other requests. The default is 1024.
.RE .RE
.PD .PD
@ -270,5 +276,3 @@ and
.SH "SEE ALSO" .SH "SEE ALSO"
.BR swift-object-server(1), .BR swift-object-server(1),

View File

@ -537,6 +537,12 @@ object. The default is 10 segments.
.IP \fBrate_limit_segments_per_sec\fR .IP \fBrate_limit_segments_per_sec\fR
Once segment rate-limiting kicks in for an object, limit segments served to N Once segment rate-limiting kicks in for an object, limit segments served to N
per second. The default is 1. per second. The default is 1.
.IP \fBmax_clients\fR
Maximum number of clients one worker can process simultaneously (it will
actually accept(2) N + 1). Setting this to one (1) will only handle one request
at a time, without accepting another request concurrently. By increasing the
number of workers to a much higher value, one can reduce the impact of slow file system
operations in one request from negatively impacting other requests. The default is 1024.
.RE .RE
.PD .PD

View File

@ -241,6 +241,16 @@ bind_ip 0.0.0.0 IP Address for server to bind to
bind_port 6000 Port for server to bind to bind_port 6000 Port for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up bind_timeout 30 Seconds to attempt bind before giving up
workers 1 Number of workers to fork workers 1 Number of workers to fork
max_clients 1024 Maximum number of clients one worker can
process simultaneously (it will actually
accept(2) N + 1). Setting this to one (1)
will only handle one request at a time,
without accepting another request
concurrently. By increasing the number of
workers to a much higher value, one can
reduce the impact of slow file system
operations in one request from negatively
impacting other requests.
disable_fallocate false Disable "fast fail" fallocate checks if the disable_fallocate false Disable "fast fail" fallocate checks if the
underlying filesystem does not support it. underlying filesystem does not support it.
log_custom_handlers None Comma-separated list of functions to call log_custom_handlers None Comma-separated list of functions to call
@ -358,6 +368,16 @@ bind_ip 0.0.0.0 IP Address for server to bind to
bind_port 6001 Port for server to bind to bind_port 6001 Port for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up bind_timeout 30 Seconds to attempt bind before giving up
workers 1 Number of workers to fork workers 1 Number of workers to fork
max_clients 1024 Maximum number of clients one worker can
process simultaneously (it will actually
accept(2) N + 1). Setting this to one (1)
will only handle one request at a time,
without accepting another request
concurrently. By increasing the number of
workers to a much higher value, one can
reduce the impact of slow file system
operations in one request from negatively
impacting other requests.
user swift User to run as user swift User to run as
disable_fallocate false Disable "fast fail" fallocate checks if the disable_fallocate false Disable "fast fail" fallocate checks if the
underlying filesystem does not support it. underlying filesystem does not support it.
@ -468,6 +488,16 @@ bind_ip 0.0.0.0 IP Address for server to bind to
bind_port 6002 Port for server to bind to bind_port 6002 Port for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up bind_timeout 30 Seconds to attempt bind before giving up
workers 1 Number of workers to fork workers 1 Number of workers to fork
max_clients 1024 Maximum number of clients one worker can
process simultaneously (it will actually
accept(2) N + 1). Setting this to one (1)
will only handle one request at a time,
without accepting another request
concurrently. By increasing the number of
workers to a much higher value, one can
reduce the impact of slow file system
operations in one request from negatively
impacting other requests.
user swift User to run as user swift User to run as
db_preallocation off If you don't mind the extra disk space usage in db_preallocation off If you don't mind the extra disk space usage in
overhead, you can turn this on to preallocate overhead, you can turn this on to preallocate
@ -572,6 +602,21 @@ bind_timeout 30 Seconds to attempt bind before
giving up giving up
swift_dir /etc/swift Swift configuration directory swift_dir /etc/swift Swift configuration directory
workers 1 Number of workers to fork workers 1 Number of workers to fork
max_clients 1024 Maximum number of clients one
worker can process
simultaneously (it will
actually accept(2) N +
1). Setting this to one (1)
will only handle one request at
a time, without accepting
another request
concurrently. By increasing the
number of workers to a much
higher value, one can reduce
the impact of slow file system
operations in one request from
negatively impacting other
requests.
user swift User to run as user swift User to run as
cert_file Path to the ssl .crt. This cert_file Path to the ssl .crt. This
should be enabled for testing should be enabled for testing
@ -785,12 +830,12 @@ should also be monitored to ensure that the times do not vary too much.
General Service Tuning General Service Tuning
---------------------- ----------------------
Most services support either a worker or concurrency value in the settings. Most services support either a `worker` or `concurrency` value in the
This allows the services to make effective use of the cores available. A good settings. This allows the services to make effective use of the cores
starting point to set the concurrency level for the proxy and storage services available. A good starting point to set the concurrency level for the proxy
to 2 times the number of cores available. If more than one service is and storage services to 2 times the number of cores available. If more than
sharing a server, then some experimentation may be needed to find the best one service is sharing a server, then some experimentation may be needed to
balance. find the best balance.
At Rackspace, our Proxy servers have dual quad core processors, giving us 8 At Rackspace, our Proxy servers have dual quad core processors, giving us 8
cores. Our testing has shown 16 workers to be a pretty good balance when cores. Our testing has shown 16 workers to be a pretty good balance when
@ -798,9 +843,21 @@ saturating a 10g network and gives good CPU utilization.
Our Storage servers all run together on the same servers. These servers have Our Storage servers all run together on the same servers. These servers have
dual quad core processors, for 8 cores total. We run the Account, Container, dual quad core processors, for 8 cores total. We run the Account, Container,
and Object servers with 8 workers each. Most of the background jobs are run and Object servers with 8 workers each. Most of the background jobs are run at
at a concurrency of 1, with the exception of the replicators which are run at a concurrency of 1, with the exception of the replicators which are run at a
a concurrency of 2. concurrency of 2.
The `max_clients` parameter can be used to adjust the number of client
requests an individual worker accepts for processing. The fewer requests being
processed at one time, the less likely a request that consumes the worker's
CPU time, or blocks in the OS, will negatively impact other requests. The more
requests being processed at one time, the more likely one worker can utilize
network and disk capacity.
On systems that have more cores, and more memory, where one can afford to run
more workers, raising the number of workers and lowering the maximum number of
clients serviced per worker can lessen the impact of CPU intensive or stalled
requests.
The above configuration setting should be taken as suggestions and testing The above configuration setting should be taken as suggestions and testing
of configuration settings should be done to ensure best utilization of CPU, of configuration settings should be done to ensure best utilization of CPU,

View File

@ -47,6 +47,7 @@ use = egg:swift#account
# set log_requests = True # set log_requests = True
# set log_address = /dev/log # set log_address = /dev/log
# auto_create_account_prefix = . # auto_create_account_prefix = .
# max_clients = 1024
[filter:healthcheck] [filter:healthcheck]
use = egg:swift#healthcheck use = egg:swift#healthcheck

View File

@ -53,6 +53,7 @@ use = egg:swift#container
# conn_timeout = 0.5 # conn_timeout = 0.5
# allow_versions = False # allow_versions = False
# auto_create_account_prefix = . # auto_create_account_prefix = .
# max_clients = 1024
[filter:healthcheck] [filter:healthcheck]
use = egg:swift#healthcheck use = egg:swift#healthcheck

View File

@ -62,6 +62,7 @@ use = egg:swift#object
# Content-Type, etag, Content-Length, or deleted # Content-Type, etag, Content-Length, or deleted
# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object # allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
# auto_create_account_prefix = . # auto_create_account_prefix = .
# max_clients = 1024
[filter:healthcheck] [filter:healthcheck]
use = egg:swift#healthcheck use = egg:swift#healthcheck

View File

@ -37,6 +37,7 @@
# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) # Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
# cors_allow_origin = # cors_allow_origin =
# eventlet_debug = false # eventlet_debug = false
# max_clients = 1024
[pipeline:main] [pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache slo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server pipeline = catch_errors healthcheck proxy-logging cache slo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server

View File

@ -105,6 +105,22 @@ def get_socket(conf, default_port=8080):
return sock return sock
class RestrictedGreenPool(GreenPool):
"""
Works the same as GreenPool, but if the size is specified as one, then the
spawn_n() method will invoke waitall() before returning to prevent the
caller from doing any other work (like calling accept()).
"""
def __init__(self, size=1024):
super(RestrictedGreenPool, self).__init__(size=size)
self._rgp_do_wait = (size == 1)
def spawn_n(self, *args, **kwargs):
super(RestrictedGreenPool, self).spawn_n(*args, **kwargs)
if self._rgp_do_wait:
self.waitall()
# TODO: pull pieces of this out to test # TODO: pull pieces of this out to test
def run_wsgi(conf_file, app_section, *args, **kwargs): def run_wsgi(conf_file, app_section, *args, **kwargs):
""" """
@ -132,7 +148,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
# redirect errors to logger and close stdio # redirect errors to logger and close stdio
capture_stdio(logger) capture_stdio(logger)
def run_server(): def run_server(max_clients):
wsgi.HttpProtocol.default_request_version = "HTTP/1.0" wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software. # Turn off logging requests by the underlying WSGI software.
wsgi.HttpProtocol.log_request = lambda *a: None wsgi.HttpProtocol.log_request = lambda *a: None
@ -147,7 +163,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
eventlet.debug.hub_exceptions(eventlet_debug) eventlet.debug.hub_exceptions(eventlet_debug)
app = loadapp('config:%s' % conf_file, app = loadapp('config:%s' % conf_file,
global_conf={'log_name': log_name}) global_conf={'log_name': log_name})
pool = GreenPool(size=1024) pool = RestrictedGreenPool(size=max_clients)
try: try:
wsgi.server(sock, app, NullLogger(), custom_pool=pool) wsgi.server(sock, app, NullLogger(), custom_pool=pool)
except socket.error, err: except socket.error, err:
@ -155,10 +171,11 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
raise raise
pool.waitall() pool.waitall()
max_clients = int(conf.get('max_clients', '1024'))
worker_count = int(conf.get('workers', '1')) worker_count = int(conf.get('workers', '1'))
# Useful for profiling [no forks]. # Useful for profiling [no forks].
if worker_count == 0: if worker_count == 0:
run_server() run_server(max_clients)
return return
def kill_children(*args): def kill_children(*args):
@ -184,7 +201,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
if pid == 0: if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
run_server() run_server(max_clients)
logger.notice('Child %d exiting normally' % os.getpid()) logger.notice('Child %d exiting normally' % os.getpid())
return return
else: else: