Fediversity/matrix/nginx/workers/upstreams.conf

117 lines
3.6 KiB
Plaintext
Raw Normal View History

# Stream workers first, they are special. The documentation says:
# "each stream can only have a single writer"
# Account-data
upstream account_data {
server unix:/run/matrix-synapse/inbound_accountdata.sock max_fails=0;
keepalive 10;
}
# Userdir
upstream userdir {
server unix:/run/matrix-synapse/inbound_userdir.sock max_fails=0;
keepalive 10;
}
# Typing
upstream typing {
server unix:/run/matrix-synapse/inbound_typing.sock max_fails=0;
keepalive 10;
}
# To device
upstream todevice {
server unix:/run/matrix-synapse/inbound_todevice.sock max_fails=0;
keepalive 10;
}
# Receipts
upstream receipts {
server unix:/run/matrix-synapse/inbound_receipts.sock max_fails=0;
keepalive 10;
}
# Presence
upstream presence {
server unix:/run/matrix-synapse/inbound_presence.sock max_fails=0;
keepalive 10;
}
# Push rules
upstream push_rules {
server unix:/run/matrix-synapse/inbound_push_rules.sock max_fails=0;
keepalive 10;
}
# End of the stream workers, the following workers are of a "normal" type
# Media
# If more than one media worker is used, they *must* all run on the same machine
upstream media {
server unix:/run/matrix-synapse/inbound_mediaworker.sock max_fails=0;
keepalive 10;
}
# Synchronisation by clients:
# Normal sync. Not particularly heavy, but happens a lot
upstream normal_sync {
# Use the username mapper result for hash key
hash $mxid_localpart consistent;
server unix:/run/matrix-synapse/inbound_normal_sync1.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_normal_sync2.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_normal_sync3.sock max_fails=0;
keepalive 10;
}
# Initial sync
# Much heavier than a normal sync, but happens less often
upstream initial_sync {
# Use the username mapper result for hash key
hash $mxid_localpart consistent;
server unix:/run/matrix-synapse/inbound_initial_sync1.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_initial_sync2.sock max_fails=0;
keepalive 10;
}
# Login
upstream login {
server unix:/run/matrix-synapse/inbound_login.sock max_fails=0;
keepalive 10;
}
# Clients
upstream client {
hash $mxid_localpart consistent;
server unix:/run/matrix-synapse/inbound_clientworker1.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_clientworker2.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_clientworker3.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_clientworker4.sock max_fails=0;
keepalive 10;
}
# Federation
# "Normal" federation, balanced round-robin over 4 workers.
upstream incoming_federation {
server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0;
keepalive 10;
}
# Inbound federation requests, need to be balanced by IP-address, but can go
# to the same pool of workers as the other federation stuff.
upstream federation_requests {
hash $remote_addr consistent;
server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0;
server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0;
keepalive 10;
}
# Main thread for all the rest
upstream inbound_main {
server unix:/run/matrix-synapse/inbound_main.sock max_fails=0;
keepalive 10;
}