discourse/config/discourse_defaults.conf

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

408 lines
13 KiB
Plaintext
Raw Normal View History

2013-12-19 23:17:21 -06:00
#
# DO NOT EDIT THIS FILE
# If you need to make changes create a file called discourse.conf in this directory with your changes
2014-06-03 17:38:10 -05:00
# On import this file will be imported using ERB
2013-12-20 01:01:41 -06:00
#
2013-12-19 23:17:21 -06:00
# Discourse supports multiple mechanisms for production config.
#
# 1. You can do nothing and get these defaults (not recommended, you should at least set hostname)
# 2. You can copy this file to config/discourse.conf and amend with your settings
# 3. You can pass in config from your environment, all the settings below are available.
2024-04-28 20:16:20 -05:00
# Prepend DISCOURSE_ and upper case the setting in ENV. For example:
# to pass in db_pool of 200 you would use DISCOURSE_DB_POOL=200
2013-12-19 23:17:21 -06:00
# All settings apply to production only
# connection pool size, sidekiq is set to 5, allowing an extra 3 for bg threads
db_pool = 8
2013-12-19 23:17:21 -06:00
2017-10-16 23:34:49 -05:00
# Database connection timeout in seconds
db_connect_timeout = 5
2013-12-19 23:17:21 -06:00
# socket file used to access db
2013-12-19 23:23:01 -06:00
db_socket =
2013-12-19 23:17:21 -06:00
# host address for db server
# This is set to blank so it tries to use sockets first
db_host =
2013-12-19 23:17:21 -06:00
# host address for db server when taking a backup via `pg_dump`
# Defaults to `db_host` if not configured
db_backup_host =
# port running db server, no need to set it
db_port =
2013-12-19 23:17:21 -06:00
# db server port to use when taking a backup via `pg_dump`
db_backup_port =
2013-12-19 23:17:21 -06:00
# database name running discourse
db_name = discourse
# username accessing database
db_username = discourse
# password used to access the db
db_password =
# Disallow prepared statements
# see: https://github.com/rails/rails/issues/21992
db_prepared_statements = false
# host address for db replica server
db_replica_host =
# port running replica db server, defaults to 5432 if not set
db_replica_port =
db_advisory_locks = true
2013-12-19 23:17:21 -06:00
# hostname running the forum
hostname = "www.example.com"
2015-07-23 00:33:38 -05:00
# backup hostname mainly for cdn use
backup_hostname =
2013-12-19 23:17:21 -06:00
# address of smtp server used to send emails
smtp_address =
# port of smtp server used to send emails
smtp_port = 25
# domain passed to smtp server
smtp_domain =
# username for smtp server
smtp_user_name =
# password for smtp server
smtp_password =
# smtp authentication mechanism
smtp_authentication = plain
2013-12-19 23:17:21 -06:00
# enable TLS encryption for smtp connections
smtp_enable_start_tls = true
# mode for verifying smtp server certificates
# to disable, set to 'none'
smtp_openssl_verify_mode =
# force implicit TLS as per RFC 8314 3.3
smtp_force_tls = false
# number of seconds to wait while attempting to open a SMTP connection
smtp_open_timeout = 5
# Number of seconds to wait until timing-out a SMTP read(2) call
smtp_read_timeout = 30
# number of seconds to wait while attempting to open a SMTP connection only when
# sending emails via group SMTP
group_smtp_open_timeout = 30
# Number of seconds to wait until timing-out a SMTP read(2) call only when sending
# emails via group SMTP
group_smtp_read_timeout = 60
# load MiniProfiler in production, to be used by developers
load_mini_profiler = true
2013-12-19 23:17:21 -06:00
# Every how many requests should MP profile a request (aka take snapshot)
# Default is never
mini_profiler_snapshots_period = 0
# specify the URL of the destination that MiniProfiler should ship snapshots to
# mini_profiler_snapshots_transport_auth_key is required as well
mini_profiler_snapshots_transport_url =
# authorization key that will be included as a header in requests made by the
# snapshots transporter to the URL specified above. The destination should
# know this key and only accept requests that have this key in the
# `Mini-Profiler-Transport-Auth` header.
mini_profiler_snapshots_transport_auth_key =
2013-12-19 23:17:21 -06:00
# recommended, cdn used to access assets
cdn_url =
# The hostname used by the CDN to request assets
cdn_origin_hostname =
# comma delimited list of emails that have developer level access
2013-12-19 23:17:21 -06:00
developer_emails =
# redis server address
redis_host = localhost
# redis server port
redis_port = 6379
# redis replica server address
redis_replica_host =
# redis replica server port
redis_replica_port = 6379
# redis database
redis_db = 0
2013-12-19 23:17:21 -06:00
# redis password
redis_password =
# skip configuring client id for cloud providers who support no client commands
redis_skip_client_commands = false
# uses SSL for all Redis connections if true
redis_use_ssl = false
# message bus redis server switch
message_bus_redis_enabled = false
# message bus redis server address
message_bus_redis_host = localhost
# message bus redis server port
message_bus_redis_port = 6379
# message bus redis replica server address
message_bus_redis_replica_host =
# message bus redis slave server port
message_bus_redis_replica_port = 6379
# message bus redis database
message_bus_redis_db = 0
# message bus redis password
message_bus_redis_password =
# skip configuring client id for cloud providers who support no client commands
message_bus_redis_skip_client_commands = false
# enable Cross-origin Resource Sharing (CORS) directly at the application level
enable_cors = false
cors_origin = ''
# enable if you really need to serve assets in prod
serve_static_assets = false
2014-05-13 19:21:11 -05:00
# number of sidekiq workers (launched via unicorn master)
sidekiq_workers = 5
# Logs Sidekiq jobs that have been running for longer than the configured number of minutes to the Rails log
sidekiq_report_long_running_jobs_minutes =
# connection reaping helps keep connection counts down, postgres
# will not work properly with huge numbers of open connections
# reap connections from pool that are older than 30 seconds
connection_reaper_age = 30
# run reap check every 30 seconds
connection_reaper_interval = 30
# set to relative URL (for subdirectory/subfolder hosting)
# IMPORTANT: path must not include a trailing /
# EG: /forum
relative_url_root =
# increasing this number will increase redis memory use
# this ensures backlog (ability of channels to catch up are capped)
# message bus default cap is 1000, we are winding it down to 100
message_bus_max_backlog_size = 100
# how often the message-bus backlog should be cleared
# lower values will make memory usage more consistent, but will
# increase redis CPU demands
message_bus_clear_every = 50
# must be a 64 byte hex string, anything else will be ignored with a warning
secret_key_base =
# fallback path for all assets which are served via the application
# used by static_controller
# in multi host setups this allows you to have old unicorn instances serve
# newly compiled assets
fallback_assets_path =
# S3 settings used for serving ALL public files
# be sure to configure a CDN as well per cdn_url
s3_bucket =
s3_region =
s3_access_key_id =
s3_secret_access_key =
2019-08-29 07:17:41 -05:00
s3_use_iam_profile =
s3_cdn_url =
s3_endpoint =
s3_http_continue_timeout =
s3_install_cors_rule =
enable_s3_transfer_acceleration =
# Optionally, specify a separate CDN to be used for static JS assets stored on S3
s3_asset_cdn_url =
### rate limits apply to all sites
max_user_api_reqs_per_minute = 20
max_user_api_reqs_per_day = 2880
max_admin_api_reqs_per_minute = 60
max_reqs_per_ip_per_minute = 200
max_reqs_per_ip_per_10_seconds = 50
# applies to asset type routes (avatars/css and so on)
max_asset_reqs_per_ip_per_10_seconds = 200
# global rate limiter will simply warn if the limit is exceeded, can be warn+block, warn, block or none
max_reqs_per_ip_mode = block
# bypass rate limiting any IP resolved as a private IP
max_reqs_rate_limit_on_private = false
FEATURE: Apply rate limits per user instead of IP for trusted users (#14706) Currently, Discourse rate limits all incoming requests by the IP address they originate from regardless of the user making the request. This can be frustrating if there are multiple users using Discourse simultaneously while sharing the same IP address (e.g. employees in an office). This commit implements a new feature to make Discourse apply rate limits by user id rather than IP address for users at or higher than the configured trust level (1 is the default). For example, let's say a Discourse instance is configured to allow 200 requests per minute per IP address, and we have 10 users at trust level 4 using Discourse simultaneously from the same IP address. Before this feature, the 10 users could only make a total of 200 requests per minute before they got rate limited. But with the new feature, each user is allowed to make 200 requests per minute because the rate limits are applied on user id rather than the IP address. The minimum trust level for applying user-id-based rate limits can be configured by the `skip_per_ip_rate_limit_trust_level` global setting. The default is 1, but it can be changed by either adding the `DISCOURSE_SKIP_PER_IP_RATE_LIMIT_TRUST_LEVEL` environment variable with the desired value to your `app.yml`, or changing the setting's value in the `discourse.conf` file. Requests made with API keys are still rate limited by IP address and the relevant global settings that control API keys rate limits. Before this commit, Discourse's auth cookie (`_t`) was simply a 32 characters string that Discourse used to lookup the current user from the database and the cookie contained no additional information about the user. However, we had to change the cookie content in this commit so we could identify the user from the cookie without making a database query before the rate limits logic and avoid introducing a bottleneck on busy sites. Besides the 32 characters auth token, the cookie now includes the user id, trust level and the cookie's generation date, and we encrypt/sign the cookie to prevent tampering. Internal ticket number: t54739.
2021-11-17 14:27:30 -06:00
# use per user rate limits vs ip rate limits for users with this trust level or more.
skip_per_ip_rate_limit_trust_level = 1
# logged in DoS protection
# protection will only trigger for requests that queue longer than this amount
force_anonymous_min_queue_seconds = 1
# only trigger anon if we see more than N requests for this path in last 10 seconds
force_anonymous_min_per_10_seconds = 3
# Any requests with the headers Discourse-Background = true will not be allowed to queue
# longer than this amount of time.
# Discourse will rate limit and ask client to try again later.
background_requests_max_queue_length = 0.5
# if a message bus request queues for 100ms or longer, we will reject it and ask consumer
# to back off
reject_message_bus_queue_seconds = 0.1
# disable search if app server is queueing for longer than this (in seconds)
disable_search_queue_threshold = 1
# maximum number of posts rebaked across the cluster in the periodical job
# rebake process is very expensive, on multisite we have to make sure we never
# flood the queue
max_old_rebakes_per_15_minutes = 300
# maximum number of log messages in /logs
max_logster_logs = 1000
# during precompile update maxmind database if older than N days
# set to 0 to disable
refresh_maxmind_db_during_precompile_days = 2
# backup path containing maxmind db files
maxmind_backup_path =
# register an account at: https://www.maxmind.com/en/geolite2/signup
# then head to profile and get your account ID and license key
maxmind_account_id =
maxmind_license_key =
# Configures a URL mirror to download the MaxMind databases from.
# When set, the file path will be appended to the mirror's URL.
# If the mirror URL is https://some.url.com/maxmind/mirror for example, the
# GeoLite2-City database file will be downloaded from https://some.url.com/maxmind/mirror/GeoLite2-City.tar.gz
maxmind_mirror_url =
# when enabled the following headers will be added to every response:
# (note, if measurements do not exist for the header they will be omitted)
#
# X-Redis-Calls: 10
# X-Redis-Time: 1.02
# X-Sql-Calls: 102
# X-Sql-Time: 1.02
# X-Queue-Time: 1.01
enable_performance_http_headers = false
# gather JavaScript errors from clients (rate limited to 1 error per IP per minute)
enable_js_error_reporting = true
# This is probably not a number you want to touch, it controls the number of workers
# we allow mini scheduler to run. Prior to 2019 we ran a single worker.
# On extremely busy setups this could lead to situations where regular jobs would
# starve. Specifically jobs such as "run heartbeat" which keeps sidekiq running.
# Having a high number here is very low risk. Regular jobs are limited in scope and scale.
mini_scheduler_workers = 5
# enable compression on anonymous cache redis entries
# this slightly increases the cost of storing cache entries but can make it much
# cheaper to retrieve cache entries when redis is stores on a different machine to the one
# running the web
compress_anon_cache = false
# Only store entries in redis for anonymous cache if they are observed more than N times
# for a specific key
#
# This ensures there are no pathological cases where we keep storing data in anonymous cache
# never to use it, set to 1 to store immediately, set to 0 to disable anon cache
anon_cache_store_threshold = 2
# EXPERIMENTAL - not yet supported in production
# by default admins can install and amend any theme
# you may restrict it so only specific themes are approved
# in allowlist mode all theme updates must happen via git repos
# themes missing from the list are automatically disallowed
# list is a comma separated list of git repos eg:
# https://github.com/discourse/discourse-custom-header-links.git,https://github.com/discourse/discourse-simple-theme.git
allowed_theme_repos =
# Demon::EmailSync is used in conjunction with the enable_imap site setting
# to sync N IMAP mailboxes with specific groups. It is a process started in
# unicorn.conf, and it spawns N threads (one for each multisite connection) and
# for each database spans another N threads (one for each configured group).
#
# We want this off by default so the process is not started when it does not
# need to be (e.g. development, test, certain hosting tiers)
enable_email_sync_demon = false
# we never want to queue more than 10000 digests per 30 minute block
# this can easily lead to blocking sidekiq
# on multisites we recommend a far lower number
max_digests_enqueued_per_30_mins_per_site = 10000
# This cluster name can be passed to the /srv/status route to verify
# the application cluster is the same one you are expecting
cluster_name =
# The YAML file used to configure multisite clusters
multisite_config_path = config/multisite.yml
# If false, only short (regular) polling will be attempted
enable_long_polling =
# Length of time to hold open a long polling connection in milliseconds
long_polling_interval =
# Specify the mode for the early hint header. Can be nil (disabled), "preconnect" (lists just CDN domains) or "preload" (lists all assets).
# The 'preload' mode currently serves inconsistent headers for different pages/users, and is not recommended for production use.
early_hint_header_mode =
# Specify which header name to use for the early hint. Defaults to "Link", but can be changed to support different proxy mechanisms.
early_hint_header_name = "Link"
# When using an external upload store, redirect `user_avatar` requests instead of proxying
redirect_avatar_requests = false
# Force the entire cluster into postgres readonly mode. Equivalent to running `Discourse.enable_pg_force_readonly_mode`
pg_force_readonly_mode = false
# default DNS query timeout for FinalDestination (used when not explicitely given programmatically)
dns_query_timeout_secs =
# Default global regex timeout
regex_timeout_seconds = 2
# Allow impersonation function on the cluster to admins
allow_impersonation = true
# The maximum number of characters allowed in a single log line.
log_line_max_chars = 160000
# this value is included when generating static asset URLs.
# Updating the value will allow site operators to invalidate all asset urls
# to recover from configuration issues which may have been cached by CDNs/browsers.
asset_url_salt =