[DEFAULT] # bind_ip = 0.0.0.0 # bind_port = 80 # bind_timeout = 30 # backlog = 4096 # swift_dir = /etc/swift # user = swift # # Use an integer to override the number of pre-forked processes that will # accept connections. Should default to the number of effective cpu # cores in the system. It's worth noting that individual workers will # use many eventlet co-routines to service multiple concurrent requests. # workers = auto # # Maximum concurrent requests per worker # max_clients = 1024 # # Set the following two lines to enable SSL. This is for testing only. # cert_file = /etc/swift/proxy.crt # key_file = /etc/swift/proxy.key # # expiring_objects_container_divisor = 86400 # # You can specify default log routing here if you want: # log_name = swift # log_facility = LOG_LOCAL0 # log_level = INFO # log_headers = false # log_address = /dev/log # # This optional suffix (default is empty) that would be appended to the swift transaction # id allows one to easily figure out from which cluster that X-Trans-Id belongs to. # This is very useful when one is managing more than one swift cluster. # trans_id_suffix = # # comma separated list of functions to call to setup custom log handlers. # functions get passed: conf, name, log_to_console, log_route, fmt, logger, # adapted_logger # log_custom_handlers = # # If set, log_udp_host will override log_address # log_udp_host = # log_udp_port = 514 # # You can enable StatsD logging here: # log_statsd_host = localhost # log_statsd_port = 8125 # log_statsd_default_sample_rate = 1.0 # log_statsd_sample_rate_factor = 1.0 # log_statsd_metric_prefix = # # Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) # cors_allow_origin = # # client_timeout = 60 # eventlet_debug = false [pipeline:main] pipeline = catch_errors healthcheck proxy-logging cache bulk slo ratelimit tempauth container-quotas account-quotas proxy-logging proxy-server [app:proxy-server] use = egg:swift#proxy # You can override the default log routing for this app here: # set log_name = proxy-server # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_address = /dev/log # # log_handoffs = true # recheck_account_existence = 60 # recheck_container_existence = 60 # object_chunk_size = 8192 # client_chunk_size = 8192 # node_timeout = 10 # conn_timeout = 0.5 # # How long without an error before a node's error count is reset. This will # also be how long before a node is reenabled after suppression is triggered. # error_suppression_interval = 60 # # How many errors can accumulate before a node is temporarily ignored. # error_suppression_limit = 10 # # If set to 'true' any authorized user may create and delete accounts; if # 'false' no one, even authorized, can. # allow_account_management = false # # Set object_post_as_copy = false to turn on fast posts where only the metadata # changes are stored anew and the original data file is kept in place. This # makes for quicker posts; but since the container metadata isn't updated in # this mode, features like container sync won't be able to sync posts. # object_post_as_copy = true # # If set to 'true' authorized accounts that do not yet exist within the Swift # cluster will be automatically created. # account_autocreate = false # # If set to a positive value, trying to create a container when the account # already has at least this maximum containers will result in a 403 Forbidden. # Note: This is a soft limit, meaning a user might exceed the cap for # recheck_account_existence before the 403s kick in. # max_containers_per_account = 0 # # This is a comma separated list of account hashes that ignore the # max_containers_per_account cap. # max_containers_whitelist = # # Comma separated list of Host headers to which the proxy will deny requests. # deny_host_headers = # # Prefix used when automatically creating accounts. # auto_create_account_prefix = . # # Depth of the proxy put queue. # put_queue_depth = 10 # # Start rate-limiting object segment serving after the Nth segment of a # segmented object. # rate_limit_after_segment = 10 # # Once segment rate-limiting kicks in for an object, limit segments served # to N per second. # rate_limit_segments_per_sec = 1 # # Storage nodes can be chosen at random (shuffle), by using timing # measurements (timing), or by using an explicit match (affinity). # Using timing measurements may allow for lower overall latency, while # using affinity allows for finer control. In both the timing and # affinity cases, equally-sorting nodes are still randomly chosen to # spread load. # The valid values for sorting_method are "affinity", "shuffle", and "timing". # sorting_method = shuffle # # If the "timing" sorting_method is used, the timings will only be valid for # the number of seconds configured by timing_expiry. # timing_expiry = 300 # # If set to false will treat objects with X-Static-Large-Object header set # as a regular object on GETs, i.e. will return that object's contents. Should # be set to false if slo is not used in pipeline. # allow_static_large_object = true # # The maximum time (seconds) that a large object connection is allowed to last. # max_large_object_get_time = 86400 # # Set to the number of nodes to contact for a normal request. You can use # '* replicas' at the end to have it use the number given times the number of # replicas for the ring being used for the request. # request_node_count = 2 * replicas # # Which backend servers to prefer on reads. Format is r for region # N or rz for region N, zone M. The value after the equals is # the priority; lower numbers are higher priority. # # Example: first read from region 1 zone 1, then region 1 zone 2, then # anything in region 2, then everything else: # read_affinity = r1z1=100, r1z2=200, r2=300 # Default is empty, meaning no preference. # read_affinity = # # Which backend servers to prefer on writes. Format is r for region # N or rz for region N, zone M. If this is set, then when # handling an object PUT request, some number (see setting # write_affinity_node_count) of local backend servers will be tried # before any nonlocal ones. # # Example: try to write to regions 1 and 2 before writing to any other # nodes: # write_affinity = r1, r2 # Default is empty, meaning no preference. # write_affinity = # # The number of local (as governed by the write_affinity setting) # nodes to attempt to contact first, before any non-local ones. You # can use '* replicas' at the end to have it use the number given # times the number of replicas for the ring being used for the # request. # write_affinity_node_count = 2 * replicas [filter:tempauth] use = egg:swift#tempauth # You can override the default log routing for this filter here: # set log_name = tempauth # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log # # The reseller prefix will verify a token begins with this prefix before even # attempting to validate it. Also, with authorization, only Swift storage # accounts with this prefix will be authorized by this middleware. Useful if # multiple auth systems are in use for one Swift cluster. # reseller_prefix = AUTH # # The auth prefix will cause requests beginning with this prefix to be routed # to the auth subsystem, for granting tokens, etc. # auth_prefix = /auth/ # token_life = 86400 # # This allows middleware higher in the WSGI pipeline to override auth # processing, useful for middleware such as tempurl and formpost. If you know # you're not going to use such middleware and you want a bit of extra security, # you can set this to false. # allow_overrides = true # # This specifies what scheme to return with storage urls: # http, https, or default (chooses based on what the server is running as) # This can be useful with an SSL load balancer in front of a non-SSL server. # storage_url_scheme = default # # Lastly, you need to list all the accounts/users you want here. The format is: # user__ = [group] [group] [...] [storage_url] # or if you want underscores in or , you can base64 encode them # (with no equal signs) and use this format: # user64__ = [group] [group] [...] [storage_url] # There are special groups of: # .reseller_admin = can do anything to any account for this auth # .admin = can do anything within the account # If neither of these groups are specified, the user can only access containers # that have been explicitly allowed for them by a .admin or .reseller_admin. # The trailing optional storage_url allows you to specify an alternate url to # hand back to the user upon authentication. If not specified, this defaults to # $HOST/v1/_ where $HOST will do its best to resolve # to what the requester would need to use to reach this host. # Here are example entries, required for running the tests: user_admin_admin = admin .admin .reseller_admin user_test_tester = testing .admin user_test2_tester2 = testing2 .admin user_test_tester3 = testing3 # To enable Keystone authentication you need to have the auth token # middleware first to be configured. Here is an example below, please # refer to the keystone's documentation for details about the # different settings. # # You'll need to have as well the keystoneauth middleware enabled # and have it in your main pipeline so instead of having tempauth in # there you can change it to: authtoken keystoneauth # # [filter:authtoken] # paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory # auth_host = keystonehost # auth_port = 35357 # auth_protocol = http # auth_uri = http://keystonehost:5000/ # admin_tenant_name = service # admin_user = swift # admin_password = password # delay_auth_decision = 1 # cache = swift.cache # # [filter:keystoneauth] # use = egg:swift#keystoneauth # Operator roles is the role which user would be allowed to manage a # tenant and be able to create container or give ACL to others. # operator_roles = admin, swiftoperator [filter:healthcheck] use = egg:swift#healthcheck # An optional filesystem path, which if present, will cause the healthcheck # URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". # This facility may be used to temporarily remove a Swift node from a load # balancer pool during maintenance or upgrade (remove the file to allow the # node back into the load balancer pool). # disable_path = [filter:cache] use = egg:swift#memcache # You can override the default log routing for this filter here: # set log_name = cache # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log # # If not set here, the value for memcache_servers will be read from # memcache.conf (see memcache.conf-sample) or lacking that file, it will # default to the value below. You can specify multiple servers separated with # commas, as in: 10.1.2.3:11211,10.1.2.4:11211 # memcache_servers = 127.0.0.1:11211 # # Sets how memcache values are serialized and deserialized: # 0 = older, insecure pickle serialization # 1 = json serialization but pickles can still be read (still insecure) # 2 = json serialization only (secure and the default) # If not set here, the value for memcache_serialization_support will be read # from /etc/swift/memcache.conf (see memcache.conf-sample). # To avoid an instant full cache flush, existing installations should # upgrade with 0, then set to 1 and reload, then after some time (24 hours) # set to 2 and reload. # In the future, the ability to use pickle serialization will be removed. # memcache_serialization_support = 2 [filter:ratelimit] use = egg:swift#ratelimit # You can override the default log routing for this filter here: # set log_name = ratelimit # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log # # clock_accuracy should represent how accurate the proxy servers' system clocks # are with each other. 1000 means that all the proxies' clock are accurate to # each other within 1 millisecond. No ratelimit should be higher than the # clock accuracy. # clock_accuracy = 1000 # # max_sleep_time_seconds = 60 # # log_sleep_time_seconds of 0 means disabled # log_sleep_time_seconds = 0 # # allows for slow rates (e.g. running up to 5 sec's behind) to catch up. # rate_buffer_seconds = 5 # # account_ratelimit of 0 means disabled # account_ratelimit = 0 # these are comma separated lists of account names # account_whitelist = a,b # account_blacklist = c,d # with container_limit_x = r # for containers of size x limit requests per second to r. The container # rate will be linearly interpolated from the values given. With the values # below, a container of size 5 will get a rate of 75. # container_ratelimit_0 = 100 # container_ratelimit_10 = 50 # container_ratelimit_50 = 20 [filter:domain_remap] use = egg:swift#domain_remap # You can override the default log routing for this filter here: # set log_name = domain_remap # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log # # storage_domain = example.com # path_root = v1 # reseller_prefixes = AUTH [filter:catch_errors] use = egg:swift#catch_errors # You can override the default log routing for this filter here: # set log_name = catch_errors # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log [filter:cname_lookup] # Note: this middleware requires python-dnspython use = egg:swift#cname_lookup # You can override the default log routing for this filter here: # set log_name = cname_lookup # set log_facility = LOG_LOCAL0 # set log_level = INFO # set log_headers = false # set log_address = /dev/log # # storage_domain = example.com # lookup_depth = 1 # Note: Put staticweb just after your auth filter(s) in the pipeline [filter:staticweb] use = egg:swift#staticweb # Note: Put tempurl just before your auth filter(s) in the pipeline [filter:tempurl] use = egg:swift#tempurl # The methods allowed with Temp URLs. # methods = GET HEAD PUT # # The headers to remove from incoming requests. Simply a whitespace delimited # list of header names and names can optionally end with '*' to indicate a # prefix match. incoming_allow_headers is a list of exceptions to these # removals. # incoming_remove_headers = x-timestamp # # The headers allowed as exceptions to incoming_remove_headers. Simply a # whitespace delimited list of header names and names can optionally end with # '*' to indicate a prefix match. # incoming_allow_headers = # # The headers to remove from outgoing responses. Simply a whitespace delimited # list of header names and names can optionally end with '*' to indicate a # prefix match. outgoing_allow_headers is a list of exceptions to these # removals. # outgoing_remove_headers = x-object-meta-* # # The headers allowed as exceptions to outgoing_remove_headers. Simply a # whitespace delimited list of header names and names can optionally end with # '*' to indicate a prefix match. # outgoing_allow_headers = x-object-meta-public-* # Note: Put formpost just before your auth filter(s) in the pipeline [filter:formpost] use = egg:swift#formpost # Note: Just needs to be placed before the proxy-server in the pipeline. [filter:name_check] use = egg:swift#name_check # forbidden_chars = '"`<> # maximum_length = 255 # forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ [filter:list-endpoints] use = egg:swift#list_endpoints # list_endpoints_path = /endpoints/ [filter:proxy-logging] use = egg:swift#proxy_logging # If not set, logging directives from [DEFAULT] without "access_" will be used # access_log_name = swift # access_log_facility = LOG_LOCAL0 # access_log_level = INFO # access_log_address = /dev/log # # If set, access_log_udp_host will override access_log_address # access_log_udp_host = # access_log_udp_port = 514 # # You can use log_statsd_* from [DEFAULT] or override them here: # access_log_statsd_host = localhost # access_log_statsd_port = 8125 # access_log_statsd_default_sample_rate = 1.0 # access_log_statsd_sample_rate_factor = 1.0 # access_log_statsd_metric_prefix = # access_log_headers = false # # By default, the X-Auth-Token is logged. To obscure the value, # set reveal_sensitive_prefix to the number of characters to log. # For example, if set to 12, only the first 12 characters of the # token appear in the log. An unauthorized access of the log file # won't allow unauthorized usage of the token. However, the first # 12 or so characters is unique enough that you can trace/debug # token usage. Set to 0 to suppress the token completely (replaced # by '...' in the log). # Note: reveal_sensitive_prefix will not affect the value # logged with access_log_headers=True. # reveal_sensitive_prefix = 8192 # # What HTTP methods are allowed for StatsD logging (comma-sep); request methods # not in this list will have "BAD_METHOD" for the portion of the metric. # log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS # # Note: The double proxy-logging in the pipeline is not a mistake. The # left-most proxy-logging is there to log requests that were handled in # middleware and never made it through to the right-most middleware (and # proxy server). Double logging is prevented for normal requests. See # proxy-logging docs. # Note: Put before both ratelimit and auth in the pipeline. [filter:bulk] use = egg:swift#bulk # max_containers_per_extraction = 10000 # max_failed_extractions = 1000 # max_deletes_per_request = 10000 # yield_frequency = 60 # Note: Put after auth in the pipeline. [filter:container-quotas] use = egg:swift#container_quotas # Note: Put before both ratelimit and auth in the pipeline. [filter:slo] use = egg:swift#slo # max_manifest_segments = 1000 # max_manifest_size = 2097152 # min_segment_size = 1048576 [filter:account-quotas] use = egg:swift#account_quotas