proxy-server.conf revision 2801
0N/A[DEFAULT]
2790N/A# bind_ip = 0.0.0.0
0N/A# bind_port = 80
0N/A# bind_timeout = 30
0N/A# backlog = 4096
0N/A# swift_dir = /etc/swift
0N/A# workers = 1
0N/A# user = swift
0N/A# Set the following two lines to enable SSL. This is for testing only.
0N/A# cert_file = /etc/swift/proxy.crt
0N/A# key_file = /etc/swift/proxy.key
0N/A# expiring_objects_container_divisor = 86400
0N/A# You can specify default log routing here if you want:
0N/A# log_name = swift
0N/A# log_facility = LOG_LOCAL0
0N/A# log_level = INFO
0N/A# log_headers = False
0N/A# log_address = /dev/log
1472N/A# comma separated list of functions to call to setup custom log handlers.
1472N/A# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
1472N/A# adapted_logger
0N/A# log_custom_handlers =
0N/A# If set, log_udp_host will override log_address
0N/A# log_udp_host =
0N/A# log_udp_port = 514
0N/A# You can enable StatsD logging here:
0N/A# log_statsd_host = localhost
0N/A# log_statsd_port = 8125
0N/A# log_statsd_default_sample_rate = 1.0
0N/A# log_statsd_sample_rate_factor = 1.0
0N/A# log_statsd_metric_prefix =
0N/A# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
0N/A# cors_allow_origin =
0N/A# eventlet_debug = false
0N/A
0N/A[pipeline:main]
0N/Apipeline = catch_errors healthcheck proxy-logging cache slo ratelimit tempauth authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
0N/A
0N/A[app:proxy-server]
0N/Ause = egg:swift#proxy
0N/A# You can override the default log routing for this app here:
0N/A# set log_name = proxy-server
0N/A# set log_facility = LOG_LOCAL0
0N/A# set log_level = INFO
0N/A# set log_address = /dev/log
0N/A# log_handoffs = True
0N/A# recheck_account_existence = 60
0N/A# recheck_container_existence = 60
0N/A# object_chunk_size = 8192
0N/A# client_chunk_size = 8192
0N/A# node_timeout = 10
0N/A# client_timeout = 60
0N/A# conn_timeout = 0.5
0N/A# How long without an error before a node's error count is reset. This will
0N/A# also be how long before a node is reenabled after suppression is triggered.
0N/A# error_suppression_interval = 60
0N/A# How many errors can accumulate before a node is temporarily ignored.
0N/A# error_suppression_limit = 10
0N/A# If set to 'true' any authorized user may create and delete accounts; if
0N/A# 'false' no one, even authorized, can.
0N/A# allow_account_management = false
2790N/A# Set object_post_as_copy = false to turn on fast posts where only the metadata
2790N/A# changes are stored anew and the original data file is kept in place. This
2790N/A# makes for quicker posts; but since the container metadata isn't updated in
2790N/A# this mode, features like container sync won't be able to sync posts.
2790N/A# object_post_as_copy = true
2790N/A# If set to 'true' authorized accounts that do not yet exist within the Swift
2790N/A# cluster will be automatically created.
2790N/Aaccount_autocreate = true
2790N/A# If set to a positive value, trying to create a container when the account
2790N/A# already has at least this maximum containers will result in a 403 Forbidden.
2790N/A# Note: This is a soft limit, meaning a user might exceed the cap for
2790N/A# recheck_account_existence before the 403s kick in.
2790N/A# max_containers_per_account = 0
2790N/A# This is a comma separated list of account hashes that ignore the
2790N/A# max_containers_per_account cap.
2790N/A# max_containers_whitelist =
2790N/A# Comma separated list of Host headers to which the proxy will deny requests.
2790N/A# deny_host_headers =
2790N/A# Prefix used when automatically creating accounts.
2790N/A# auto_create_account_prefix = .
2790N/A# Depth of the proxy put queue.
2790N/A# put_queue_depth = 10
2790N/A# Start rate-limiting object segment serving after the Nth segment of a
2790N/A# segmented object.
2790N/A# rate_limit_after_segment = 10
2790N/A# Once segment rate-limiting kicks in for an object, limit segments served
2790N/A# to N per second.
2790N/A# rate_limit_segments_per_sec = 1
2790N/A# Storage nodes can be chosen at random (shuffle) or by using timing
2790N/A# measurements. Using timing measurements may allow for lower overall latency.
2790N/A# The valid values for sorting_method are "shuffle" and "timing"
2790N/A# sorting_method = shuffle
2790N/A# If the timing sorting_method is used, the timings will only be valid for
2790N/A# the number of seconds configured by timing_expiry.
2790N/A# timing_expiry = 300
2790N/A# If set to false will treat objects with X-Static-Large-Object header set
2790N/A# as a regular object on GETs, i.e. will return that object's contents. Should
2790N/A# be set to false if slo is not used in pipeline.
2790N/A# allow_static_large_object = true
2790N/A
2790N/A[filter:tempauth]
2790N/Ause = egg:swift#tempauth
2790N/A# You can override the default log routing for this filter here:
2790N/A# set log_name = tempauth
2790N/A# set log_facility = LOG_LOCAL0
2790N/A# set log_level = INFO
2790N/A# set log_headers = False
2790N/A# set log_address = /dev/log
2790N/A# The reseller prefix will verify a token begins with this prefix before even
2790N/A# attempting to validate it. Also, with authorization, only Swift storage
2790N/A# accounts with this prefix will be authorized by this middleware. Useful if
2790N/A# multiple auth systems are in use for one Swift cluster.
2790N/A# reseller_prefix = AUTH
2790N/A# The auth prefix will cause requests beginning with this prefix to be routed
2790N/A# to the auth subsystem, for granting tokens, etc.
2790N/A# auth_prefix = /auth/
2790N/A# token_life = 86400
2790N/A# This allows middleware higher in the WSGI pipeline to override auth
2790N/A# processing, useful for middleware such as tempurl and formpost. If you know
2790N/A# you're not going to use such middleware and you want a bit of extra security,
2790N/A# you can set this to false.
2790N/A# allow_overrides = true
2790N/A# This specifies what scheme to return with storage urls:
2790N/A# http, https, or default (chooses based on what the server is running as)
2790N/A# This can be useful with an SSL load balancer in front of a non-SSL server.
2790N/A# storage_url_scheme = default
2790N/A# Lastly, you need to list all the accounts/users you want here. The format is:
2790N/A# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
2790N/A# or if you want underscores in <account> or <user>, you can base64 encode them
2790N/A# (with no equal signs) and use this format:
2790N/A# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
2790N/A# There are special groups of:
2790N/A# .reseller_admin = can do anything to any account for this auth
0N/A# .admin = can do anything within the account
0N/A# If neither of these groups are specified, the user can only access containers
0N/A# that have been explicitly allowed for them by a .admin or .reseller_admin.
0N/A# The trailing optional storage_url allows you to specify an alternate url to
0N/A# hand back to the user upon authentication. If not specified, this defaults to
0N/A# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
1015N/A# to what the requester would need to use to reach this host.
1015N/A# Here are example entries, required for running the tests:
1015N/Auser_admin_admin = admin .admin .reseller_admin
2790N/Auser_test_tester = testing .admin
2790N/Auser_test2_tester2 = testing2 .admin
2790N/Auser_test_tester3 = testing3
1015N/A
0N/A# To enable Keystone authentication you need to have the auth token
2671N/A# middleware first to be configured. Here is an example below, please
2671N/A# refer to the keystone's documentation for details about the
1601N/A# different settings.
0N/A#
0N/A# You'll need to have as well the keystoneauth middleware enabled
0N/A# and have it in your main pipeline so instead of having tempauth in
0N/A# there you can change it to: authtoken keystone
2790N/A#
2790N/A[filter:authtoken]
2790N/Apaste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
2790N/Aauth_host = 127.0.0.1
2790N/Aauth_port = 35357
1601N/Aauth_protocol = http
0N/Aauth_uri = http://127.0.0.1:5000/
0N/Aadmin_tenant_name = %SERVICE_TENANT_NAME%
0N/Aadmin_user = %SERVICE_USER%
0N/Aadmin_password = %SERVICE_PASSWORD%
1601N/Adelay_auth_decision = 1
1488N/Acache = swift.cache
1488N/Ainclude_service_catalog = False
2790N/Asigning_dir = /var/lib/swift/keystone-signing
2790N/A
2790N/A[filter:keystoneauth]
2790N/Ause = egg:swift#keystoneauth
2790N/A# Operator roles is the role which user would be allowed to manage a
2790N/A# tenant and be able to create container or give ACL to others.
2790N/A# operator_roles = admin, swiftoperator
1601N/A
1601N/A[filter:healthcheck]
1601N/Ause = egg:swift#healthcheck
2790N/A# An optional filesystem path, which if present, will cause the healthcheck
2790N/A# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
2790N/A# This facility may be used to temporarily remove a Swift node from a load
2790N/A# balancer pool during maintenance or upgrade (remove the file to allow the
1601N/A# node back into the load balancer pool).
0N/A# disable_path =
0N/A
0N/A[filter:cache]
2790N/Ause = egg:swift#memcache
2790N/A# You can override the default log routing for this filter here:
2790N/A# set log_name = cache
0N/A# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# set log_address = /dev/log
# If not set here, the value for memcache_servers will be read from
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
# default to the value below. You can specify multiple servers separated with
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
# memcache_servers = 127.0.0.1:11211
#
# Sets how memcache values are serialized and deserialized:
# 0 = older, insecure pickle serialization
# 1 = json serialization but pickles can still be read (still insecure)
# 2 = json serialization only (secure and the default)
# If not set here, the value for memcache_serialization_support will be read
# from /etc/swift/memcache.conf (see memcache.conf-sample).
# To avoid an instant full cache flush, existing installations should
# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
# set to 2 and reload.
# In the future, the ability to use pickle serialization will be removed.
# memcache_serialization_support = 2
[filter:ratelimit]
use = egg:swift#ratelimit
# You can override the default log routing for this filter here:
# set log_name = ratelimit
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# set log_address = /dev/log
# clock_accuracy should represent how accurate the proxy servers' system clocks
# are with each other. 1000 means that all the proxies' clock are accurate to
# each other within 1 millisecond. No ratelimit should be higher than the
# clock accuracy.
# clock_accuracy = 1000
# max_sleep_time_seconds = 60
# log_sleep_time_seconds of 0 means disabled
# log_sleep_time_seconds = 0
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
# rate_buffer_seconds = 5
# account_ratelimit of 0 means disabled
# account_ratelimit = 0
# these are comma separated lists of account names
# account_whitelist = a,b
# account_blacklist = c,d
# with container_limit_x = r
# for containers of size x limit requests per second to r. The container
# rate will be linearly interpolated from the values given. With the values
# below, a container of size 5 will get a rate of 75.
# container_ratelimit_0 = 100
# container_ratelimit_10 = 50
# container_ratelimit_50 = 20
[filter:domain_remap]
use = egg:swift#domain_remap
# You can override the default log routing for this filter here:
# set log_name = domain_remap
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# set log_address = /dev/log
# storage_domain = example.com
# path_root = v1
# reseller_prefixes = AUTH
[filter:catch_errors]
use = egg:swift#catch_errors
# You can override the default log routing for this filter here:
# set log_name = catch_errors
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# set log_address = /dev/log
[filter:cname_lookup]
# Note: this middleware requires python-dnspython
use = egg:swift#cname_lookup
# You can override the default log routing for this filter here:
# set log_name = cname_lookup
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# set log_address = /dev/log
# storage_domain = example.com
# lookup_depth = 1
# Note: Put staticweb just after your auth filter(s) in the pipeline
[filter:staticweb]
use = egg:swift#staticweb
# Seconds to cache container x-container-meta-web-* header values.
# cache_timeout = 300
# Note: Put tempurl just before your auth filter(s) in the pipeline
[filter:tempurl]
use = egg:swift#tempurl
#
# The headers to remove from incoming requests. Simply a whitespace delimited
# list of header names and names can optionally end with '*' to indicate a
# prefix match. incoming_allow_headers is a list of exceptions to these
# removals.
# incoming_remove_headers = x-timestamp
#
# The headers allowed as exceptions to incoming_remove_headers. Simply a
# whitespace delimited list of header names and names can optionally end with
# '*' to indicate a prefix match.
# incoming_allow_headers =
#
# The headers to remove from outgoing responses. Simply a whitespace delimited
# list of header names and names can optionally end with '*' to indicate a
# prefix match. outgoing_allow_headers is a list of exceptions to these
# removals.
# outgoing_remove_headers = x-object-meta-*
#
# The headers allowed as exceptions to outgoing_remove_headers. Simply a
# whitespace delimited list of header names and names can optionally end with
# '*' to indicate a prefix match.
# outgoing_allow_headers = x-object-meta-public-*
# Note: Put formpost just before your auth filter(s) in the pipeline
[filter:formpost]
use = egg:swift#formpost
# Note: Just needs to be placed before the proxy-server in the pipeline.
[filter:name_check]
use = egg:swift#name_check
# forbidden_chars = '"`<>
# maximum_length = 255
# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
[filter:list-endpoints]
use = egg:swift#list_endpoints
# list_endpoints_path = /endpoints/
[filter:proxy-logging]
use = egg:swift#proxy_logging
# If not set, logging directives from [DEFAULT] without "access_" will be used
# access_log_name = swift
# access_log_facility = LOG_LOCAL0
# access_log_level = INFO
# access_log_address = /dev/log
# If set, access_log_udp_host will override access_log_address
# access_log_udp_host =
# access_log_udp_port = 514
# You can use log_statsd_* from [DEFAULT] or override them here:
# access_log_statsd_host = localhost
# access_log_statsd_port = 8125
# access_log_statsd_default_sample_rate = 1.0
# access_log_statsd_sample_rate_factor = 1.0
# access_log_statsd_metric_prefix =
# access_log_headers = False
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
# Note: The double proxy-logging in the pipeline is not a mistake. The
# left-most proxy-logging is there to log requests that were handled in
# middleware and never made it through to the right-most middleware (and
# proxy server). Double logging is prevented for normal requests. See
# proxy-logging docs.
# Note: Put before both ratelimit and auth in the pipeline.
[filter:bulk]
use = egg:swift#bulk
# max_containers_per_extraction = 10000
# max_failed_files = 1000
# max_deletes_per_request = 1000
# Note: Put after auth in the pipeline.
[filter:container-quotas]
use = egg:swift#container_quotas
# Note: Put before both ratelimit and auth in the pipeline.
[filter:slo]
use = egg:swift#slo
# max_manifest_segments = 1000
# max_manifest_size = 2097152
# min_segment_size = 1048576
[filter:account-quotas]
use = egg:swift#account_quotas