blob: a6cf9dd98feffc5b0db3193d1156ea15f17a2e8f [file] [log] [blame]
local kube = import "../../../kube/kube.libsonnet";
{
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image needs to be set",
storageClassName: error "cfg.storrageClassName needs to be set",
# webDomain is the domain name at which synapse instance will run
webDomain: error "cfg.webDomain must be set",
# serverName is the server part of the MXID this homeserver will cover
serverName: error "cfg.serverName must be set",
cas: { enable: false },
oidc: { enable: false },
appserviceWorker: false,
federationWorker: false,
macaroonSecretKey: error "cfg.macaroonSecretKey needs to be set",
registrationSharedSecret: error "cfg.registationSharedSecret needs to be set",
workerReplicationSecret: error "cfg.workerReplicationSecret needs to be set",
},
ns:: error "ns needs to be provided",
local ns = top.ns,
postgres:: error "postgres needs to be provided",
redis:: error "redis needs to be provided",
// See matrix-ng.libsonnet for description
appservices:: error "appservices need to be provided",
dataVolume: ns.Contain(kube.PersistentVolumeClaim("synapse-data-waw3")) {
storage:: "50Gi",
storageClass:: cfg.storageClassName,
},
// homeserver.yaml that will be used to run synapse (in configMap).
// This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
// deployment.
config:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
server_name: cfg.serverName,
public_baseurl: "https://%s" % [cfg.webDomain],
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(top.appservices)
],
notify_appservices: cfg.appserviceWorker == false,
# FIXME(informatic) Rolling out with federationWorkers = true breaks
# *some* federation, needs investigation...
#send_federation: cfg.federationWorker == false,
#federation_sender_instances: if cfg.federationWorker then [
# "%s-%s" % [top.federationSenderWorker.deployment.metadata.name, idx]
# for idx in std.range(0, top.federationSenderWorker.deployment.spec.replicas)
#] else [],
} + (if cfg.cas.enable then {
cas_config: {
enabled: true,
server_url: "https://%s/_cas" % [cfg.webDomain],
service_url: "https://%s" % [cfg.webDomain],
},
} else {}) + (if cfg.coturn.enable then {
turn_uris: [ "turn:%s?transport=udp" % cfg.coturn.config.domain, "turn:%s?transport=tcp" % cfg.coturn.config.domain ],
# Lifetime of single TURN user credentials - 1 day, recommended by TURN REST
# spec, see https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00#section-2.2
turn_user_lifetime: 24 * 60 * 60 * 1000,
turn_allow_guests: true,
} else {}),
configMap: ns.Contain(kube.ConfigMap("synapse")) {
data: {
"homeserver.yaml": std.manifestYamlDoc(top.config),
"log.config": importstr "synapse/log.config",
},
},
// homeserver-secrets.yaml contains all the templated secret variables from
// base homeserver.yaml passed as yaml-encoded environment variable.
// $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
secretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
} + (if cfg.oidc.enable then {
oidc_config: cfg.oidc.config {
enabled: true,
client_secret: "$(OIDC_CLIENT_SECRET)",
},
} else {}) + (if cfg.coturn.enable then {
turn_shared_secret: "$(TURN_SHARED_SECRET)",
} else {}),
# Synapse process Deployment/StatefulSet base resource.
SynapseWorker(name, workerType, builder):: ns.Contain(builder(name)) {
local worker = self,
cfg:: {
# Configuration customization. Can contain environment substitution
# syntax, as used in worker_name value.
localConfig: {
worker_app: workerType,
worker_name: "$(POD_NAME)",
# The replication listener on the main synapse process.
worker_replication_host: "synapse-replication-master",
worker_replication_http_port: 9093,
},
# Mount app.dataVolume in /data
mountData: false,
resources: {
requests: { cpu: "300m", memory: "2Gi" },
limits: { cpu: "1500m", memory: "2Gi" },
},
},
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(top.configMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(top.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
image: cfg.image,
command: [
"/bin/sh", "-c", |||
set -e
echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
|||
],
resources: worker.cfg.resources,
ports_: {
http: { containerPort: 8008 },
metrics: { containerPort: 9092 },
replication: { containerPort: 9093 },
},
env_: {
SYNAPSE_WORKER: workerType,
SYNAPSE_MACAROON_SECRET_KEY: cfg.macaroonSecretKey,
SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
POSTGRES_PASSWORD: top.postgres.cfg.password,
POSTGRES_USER: top.postgres.cfg.username,
POSTGRES_DB: top.postgres.cfg.database,
POSTGRES_HOST: top.postgres.cfg.host,
POSTGRES_PORT: top.postgres.cfg.port,
REDIS_PASSWORD: top.redis.cfg.password,
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
TURN_SHARED_SECRET: if cfg.coturn.enable then cfg.coturn.config.authSecret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(top.secretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
config: { mountPath: "/conf", },
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
readinessProbe: {
httpGet: {
path: "/health",
port: "http",
},
initialDelaySeconds: 5,
periodSeconds: 10,
},
livenessProbe: {
httpGet: {
path: "/health",
port: "http",
},
initialDelaySeconds: 60,
periodSeconds: 30,
},
},
},
securityContext: {
runAsUser: 991,
runAsGroup: 991,
fsGroup: 991,
},
},
},
},
},
# Synapse main process
main: {
deployment: top.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
localConfig: {
# Following configuration values need to cause master
# process restart.
notify_appservices: top.config.notify_appservices,
# send_federation: top.config.send_federation,
# federation_sender_instances: top.config.federation_sender_instances,
},
resources+: {
limits+: { memory: "4Gi" },
requests+: { memory: "2Gi" },
},
},
spec+: {
strategy+: {
rollingUpdate: {
maxSurge: 0,
maxUnavailable: 1,
},
},
},
},
svc: ns.Contain(kube.Service("synapse")) {
target:: top.main.deployment,
},
replicationSvc: ns.Contain(kube.Service("synapse-replication-master")) {
target:: top.main.deployment,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
],
},
},
},
genericWorker: {
# Synapse generic worker deployment
deployment: top.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["client", "federation"]}],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
svc: ns.Contain(kube.Service("synapse-generic")) {
target:: top.genericWorker.deployment,
},
# Following paths can be handled by generic workers.
# See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
pathsList:: |||
# Sync requests
^/_matrix/client/(r0|v3)/sync$
^/_matrix/client/(api/v1|r0|v3)/events$
^/_matrix/client/(api/v1|r0|v3)/initialSync$
^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
# Federation requests
^/_matrix/federation/v1/event/
^/_matrix/federation/v1/state/
^/_matrix/federation/v1/state_ids/
^/_matrix/federation/v1/backfill/
^/_matrix/federation/v1/get_missing_events/
^/_matrix/federation/v1/publicRooms
^/_matrix/federation/v1/query/
^/_matrix/federation/v1/make_join/
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/(v1|v2)/send_join/
^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/timestamp_to_event/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
^/_matrix/key/v2/query
^/_matrix/federation/v1/hierarchy/
# Inbound federation transaction request
^/_matrix/federation/v1/send/
# Client API requests
^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$
^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
^/_matrix/client/(r0|v3|unstable)/devices$
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
^/_matrix/client/(r0|v3|unstable)/room_keys/
^/_matrix/client/(r0|v3|unstable)/keys/upload/
# Registration/login requests
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
^/_matrix/client/(r0|v3|unstable)/register$
^/_matrix/client/v1/register/m.login.registration_token/validity$
# Event sending requests
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
^/_matrix/client/(api/v1|r0|v3|unstable)/knock/
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
# Account data requests
#^/_matrix/client/(r0|v3|unstable)/.*/tags
#^/_matrix/client/(r0|v3|unstable)/.*/account_data
# Receipts requests
#^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
#^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
# Presence requests
#^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
# User directory search requests
#^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|||,
paths:: std.filterMap(
# Ignore comments and empty lines
function(v) !std.startsWith(v, '#') && std.length(v) > 1,
# Strip leading ^
function(v) std.substr(v, 1, std.length(v) - 1),
std.split(self.pathsList, "\n")
),
},
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
mediaWorker: {
deployment: top.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["media"]}],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
svc: ns.Contain(kube.Service("synapse-media")) {
target:: top.mediaWorker.deployment,
},
},
appserviceWorker: if cfg.appserviceWorker then {
# Worker responsible for sending traffic to registered appservices
deployment: top.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: [] }],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
} else null,
federationSenderWorker: if cfg.federationWorker then {
deployment: top.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: [] }],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
spec+: {
replicas: 2,
},
},
} else null,
}