mirror of
https://github.com/discourse/discourse.git
synced 2024-11-22 08:57:10 -06:00
b4bfc27b19
We have tested rate limiting with admin accounts with block rate limiting for close to 12 months now on meta.discourse.org. This has resulted in no degradation of services even to admin accounts that request a lot of info from the site. The default of 200 requests a minute and 50 per 10 seconds is very generous. It simply protects against very aggressive clients. This setting can be disabled or tweaked using: DISCOURSE_MAX_REQS_PER_IP_MODE and family. The only big downside here is in cases when a very large number of users tend to all come from a single IP. This can be the case on sites accessing Discourse from an internal network all sharing the same IP via NAT. Or a misconfigured Discourse that is unable to resolve IP addresses of users due to proxy mis-configuration.
281 lines
8.0 KiB
Plaintext
281 lines
8.0 KiB
Plaintext
#
|
|
# DO NOT EDIT THIS FILE
|
|
# If you need to make changes create a file called discourse.conf in this directory with your changes
|
|
# On import this file will be imported using ERB
|
|
#
|
|
|
|
# Discourse supports multiple mechanisms for production config.
|
|
#
|
|
# 1. You can do nothing and get these defaults (not recommended, you should at least set hostname)
|
|
# 2. You can copy this file to config/discourse.conf and amend with your settings
|
|
# 3. You can pass in config from your environment, all the settings below are available.
|
|
# Append DISCOURSE_ and upper case the setting in ENV. For example:
|
|
# to pass in db_timeout of 200 you would use DISCOURSE_DB_TIMEOUT=200
|
|
|
|
# All settings apply to production only
|
|
|
|
# connection pool size, sidekiq is set to 5, allowing an extra 3 for bg threads
|
|
db_pool = 8
|
|
|
|
# ActiveRecord connection pool timeout in milliseconds
|
|
db_timeout = 5000
|
|
|
|
# Database connection timeout in seconds
|
|
db_connect_timeout = 5
|
|
|
|
# socket file used to access db
|
|
db_socket =
|
|
|
|
# host address for db server
|
|
# This is set to blank so it tries to use sockets first
|
|
db_host =
|
|
|
|
# host address for db server when taking a backup via `pg_dump`
|
|
# Defaults to `db_host` if not configured
|
|
db_backup_host =
|
|
|
|
# port running db server, no need to set it
|
|
db_port =
|
|
|
|
# db server port to use when taking a backup via `pg_dump`
|
|
db_backup_port = 5432
|
|
|
|
# database name running discourse
|
|
db_name = discourse
|
|
|
|
# username accessing database
|
|
db_username = discourse
|
|
|
|
# password used to access the db
|
|
db_password =
|
|
|
|
# Disallow prepared statements
|
|
# see: https://github.com/rails/rails/issues/21992
|
|
db_prepared_statements = false
|
|
|
|
# host address for db replica server
|
|
db_replica_host =
|
|
|
|
# port running replica db server, defaults to 5432 if not set
|
|
db_replica_port =
|
|
|
|
# hostname running the forum
|
|
hostname = "www.example.com"
|
|
|
|
# backup hostname mainly for cdn use
|
|
backup_hostname =
|
|
|
|
# address of smtp server used to send emails
|
|
smtp_address =
|
|
|
|
# port of smtp server used to send emails
|
|
smtp_port = 25
|
|
|
|
# domain passed to smtp server
|
|
smtp_domain =
|
|
|
|
# username for smtp server
|
|
smtp_user_name =
|
|
|
|
# password for smtp server
|
|
smtp_password =
|
|
|
|
# smtp authentication mechanism
|
|
smtp_authentication = plain
|
|
|
|
# enable TLS encryption for smtp connections
|
|
smtp_enable_start_tls = true
|
|
|
|
# mode for verifying smtp server certificates
|
|
# to disable, set to 'none'
|
|
smtp_openssl_verify_mode =
|
|
|
|
# load MiniProfiler in production, to be used by developers
|
|
load_mini_profiler = true
|
|
|
|
# recommended, cdn used to access assets
|
|
cdn_url =
|
|
|
|
# comma delimited list of emails that have developer level access
|
|
developer_emails =
|
|
|
|
# redis server address
|
|
redis_host = localhost
|
|
|
|
# redis server port
|
|
redis_port = 6379
|
|
|
|
# redis slave server address
|
|
redis_slave_host =
|
|
|
|
# redis slave server port
|
|
redis_slave_port = 6379
|
|
|
|
# redis database
|
|
redis_db = 0
|
|
|
|
# redis password
|
|
redis_password =
|
|
|
|
# skip configuring client id for cloud providers who support no client commands
|
|
redis_skip_client_commands = false
|
|
|
|
# message bus redis server switch
|
|
message_bus_redis_enabled = false
|
|
|
|
# message bus redis server address
|
|
message_bus_redis_host = localhost
|
|
|
|
# message bus redis server port
|
|
message_bus_redis_port = 6379
|
|
|
|
# message bus redis slave server address
|
|
message_bus_redis_slave_host =
|
|
|
|
# message bus redis slave server port
|
|
message_bus_redis_slave_port = 6379
|
|
|
|
# message bus redis database
|
|
message_bus_redis_db = 0
|
|
|
|
# message bus redis password
|
|
message_bus_redis_password =
|
|
|
|
# skip configuring client id for cloud providers who support no client commands
|
|
message_bus_redis_skip_client_commands = false
|
|
|
|
# enable Cross-origin Resource Sharing (CORS) directly at the application level
|
|
enable_cors = false
|
|
cors_origin = ''
|
|
|
|
# enable if you really need to serve assets in prd
|
|
serve_static_assets = false
|
|
|
|
# number of sidekiq workers (launched via unicorn master)
|
|
sidekiq_workers = 5
|
|
|
|
# adjust stylesheets to rtl (requires "rtlit" gem)
|
|
rtl_css = false
|
|
|
|
# notify admin when a new version of discourse is released
|
|
# this is global so it is easier to set in multisites
|
|
# TODO allow for global overrides
|
|
new_version_emails = true
|
|
|
|
# connection reaping helps keep connection counts down, postgres
|
|
# will not work properly with huge numbers of open connections
|
|
# reap connections from pool that are older than 30 seconds
|
|
connection_reaper_age = 30
|
|
|
|
# run reap check every 30 seconds
|
|
connection_reaper_interval = 30
|
|
|
|
# set to relative URL (for subdirectory hosting)
|
|
# IMPORTANT: path must not include a trailing /
|
|
# EG: /forum
|
|
relative_url_root =
|
|
|
|
# increasing this number will increase redis memory use
|
|
# this ensures backlog (ability of channels to catch up are capped)
|
|
# message bus default cap is 1000, we are winding it down to 100
|
|
message_bus_max_backlog_size = 100
|
|
|
|
# must be a 64 byte hex string, anything else will be ignored with a warning
|
|
secret_key_base =
|
|
|
|
# fallback path for all assets which are served via the application
|
|
# used by static_controller
|
|
# in multi host setups this allows you to have old unicorn instances serve
|
|
# newly compiled assets
|
|
fallback_assets_path =
|
|
|
|
# S3 settings used for serving ALL public files
|
|
# be sure to configre a CDN as well per cdn_url
|
|
s3_bucket =
|
|
s3_region =
|
|
s3_access_key_id =
|
|
s3_secret_access_key =
|
|
s3_use_iam_profile =
|
|
s3_cdn_url =
|
|
s3_endpoint =
|
|
|
|
### rate limits apply to all sites
|
|
max_user_api_reqs_per_minute = 20
|
|
max_user_api_reqs_per_day = 2880
|
|
|
|
max_admin_api_reqs_per_key_per_minute = 60
|
|
|
|
max_reqs_per_ip_per_minute = 200
|
|
max_reqs_per_ip_per_10_seconds = 50
|
|
|
|
# applies to asset type routes (avatars/css and so on)
|
|
max_asset_reqs_per_ip_per_10_seconds = 200
|
|
|
|
# global rate limiter will simply warn if the limit is exceeded, can be warn+block, warn, block or none
|
|
max_reqs_per_ip_mode = block
|
|
|
|
# bypass rate limiting any IP resolved as a private IP
|
|
max_reqs_rate_limit_on_private = false
|
|
|
|
# logged in DoS protection
|
|
|
|
# protection will only trigger for requests that queue longer than this amount
|
|
force_anonymous_min_queue_seconds = 1
|
|
# only trigger anon if we see more than N requests for this path in last 10 seconds
|
|
force_anonymous_min_per_10_seconds = 3
|
|
|
|
# if a message bus request queues for 100ms or longer, we will reject it and ask consumer
|
|
# to back off
|
|
reject_message_bus_queue_seconds = 0.1
|
|
|
|
# disable search if app server is queueing for longer than this (in seconds)
|
|
disable_search_queue_threshold = 1
|
|
|
|
# maximum number of posts rebaked across the cluster in the periodical job
|
|
# rebake process is very expensive, on multisite we have to make sure we never
|
|
# flood the queue
|
|
max_old_rebakes_per_15_minutes = 300
|
|
|
|
# maximum number of log messages in /logs
|
|
max_logster_logs = 1000
|
|
|
|
# during precompile update maxmind database if older than N days
|
|
# set to 0 to disable
|
|
refresh_maxmind_db_during_precompile_days = 2
|
|
|
|
# backup path containing maxmind db files
|
|
maxmind_backup_path =
|
|
|
|
# when enabled the following headers will be added to every response:
|
|
# (note, if measurements do not exist for the header they will be omitted)
|
|
#
|
|
# X-Redis-Calls: 10
|
|
# X-Redis-Time: 1.02
|
|
# X-Sql-Calls: 102
|
|
# X-Sql-Time: 1.02
|
|
# X-Queue-Time: 1.01
|
|
enable_performance_http_headers = false
|
|
|
|
# gather JavaScript errors from clients (rate limited to 1 error per IP per minute)
|
|
enable_js_error_reporting = true
|
|
|
|
# This is probably not a number you want to touch, it controls the number of workers
|
|
# we allow mini scheduler to run. Prior to 2019 we ran a single worker.
|
|
# On extremely busy setups this could lead to situations where regular jobs would
|
|
# starve. Specifically jobs such as "run heartbeat" which keeps sidekiq running.
|
|
# Having a high number here is very low risk. Regular jobs are limited in scope and scale.
|
|
mini_scheduler_workers = 5
|
|
|
|
# enable compression on anonymous cache redis entries
|
|
# this slightly increases the cost of storing cache entries but can make it much
|
|
# cheaper to retrieve cache entries when redis is stores on a different machine to the one
|
|
# running the web
|
|
compress_anon_cache = false
|
|
|
|
# Only store entries in redis for anonymous cache if they are observed more than N times
|
|
# for a specific key
|
|
#
|
|
# This ensures there are no pathological cases where we keep storing data in anonymous cache
|
|
# never to use it, set to 1 to store immediately, set to 0 to disable anon cache
|
|
anon_cache_store_threshold = 2
|