blob: 6bf614409253db829e3acaef971d2d6a6d800ad0 [file] [log] [blame]
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
image: error "cfg.image needs to be set",
storageClassName: error "cfg.storrageClassName needs to be set",
# webDomain is the domain name at which synapse instance will run
webDomain: error "cfg.webDomain must be set",
# serverName is the server part of the MXID this homeserver will cover
serverName: error "cfg.serverName must be set",
cas: { enable: false },
oidc: { enable: false },
appserviceWorker: false,
federationWorker: false,
macaroonSecretKey: error "cfg.macaroonSecretKey needs to be set",
registrationSharedSecret: error "cfg.registationSharedSecret needs to be set",
workerReplicationSecret: error "cfg.workerReplicationSecret needs to be set",
},
ns:: error "ns needs to be provided",
postgres:: error "postgres needs to be provided",
redis:: error "redis needs to be provided",
// See matrix-ng.libsonnet for description
appservices:: error "appservices need to be provided",
dataVolume: app.ns.Contain(kube.PersistentVolumeClaim("synapse-data-waw3")) {
spec+: {
storageClassName: cfg.storageClassName,
accessModes: [ "ReadWriteOnce" ],
resources: {
requests: {
storage: "50Gi",
},
},
},
},
// homeserver.yaml that will be used to run synapse (in configMap).
// This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
// deployment.
config:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
server_name: cfg.serverName,
public_baseurl: "https://%s" % [cfg.webDomain],
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
],
notify_appservices: cfg.appserviceWorker == false,
# FIXME(informatic) Rolling out with federationWorkers = true breaks
# *some* federation, needs investigation...
#send_federation: cfg.federationWorker == false,
#federation_sender_instances: if cfg.federationWorker then [
# "%s-%s" % [app.federationSenderWorker.deployment.metadata.name, idx]
# for idx in std.range(0, app.federationSenderWorker.deployment.spec.replicas)
#] else [],
} + (if cfg.cas.enable then {
cas_config: {
enabled: true,
server_url: "https://%s/_cas" % [cfg.webDomain],
service_url: "https://%s" % [cfg.webDomain],
},
} else {}) + (if cfg.coturn.enable then {
turn_uris: [ "turn:%s?transport=udp" % cfg.coturn.config.domain, "turn:%s?transport=tcp" % cfg.coturn.config.domain ],
# Lifetime of single TURN user credentials - 1 day, recommended by TURN REST
# spec, see https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00#section-2.2
turn_user_lifetime: 24 * 60 * 60 * 1000,
turn_allow_guests: true,
} else {}),
configMap: app.ns.Contain(kube.ConfigMap("synapse")) {
data: {
"homeserver.yaml": std.manifestYamlDoc(app.config),
"log.config": importstr "synapse/log.config",
},
},
// homeserver-secrets.yaml contains all the templated secret variables from
// base homeserver.yaml passed as yaml-encoded environment variable.
// $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
secretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
} + (if cfg.oidc.enable then {
oidc_config: cfg.oidc.config {
enabled: true,
client_secret: "$(OIDC_CLIENT_SECRET)",
},
} else {}) + (if cfg.coturn.enable then {
turn_shared_secret: "$(TURN_SHARED_SECRET)",
} else {}),
# Synapse process Deployment/StatefulSet base resource.
SynapseWorker(name, workerType, builder):: app.ns.Contain(builder(name)) {
local worker = self,
cfg:: {
# Configuration customization. Can contain environment substitution
# syntax, as used in worker_name value.
localConfig: {
worker_app: workerType,
worker_name: "$(POD_NAME)",
# The replication listener on the main synapse process.
worker_replication_host: "synapse-replication-master",
worker_replication_http_port: 9093,
},
# Mount app.dataVolume in /data
mountData: false,
resources: {
requests: { cpu: "300m", memory: "2Gi" },
limits: { cpu: "1500m", memory: "2Gi" },
},
},
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
image: cfg.image,
command: [
"/bin/sh", "-c", |||
set -e
echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
|||
],
resources: worker.cfg.resources,
ports_: {
http: { containerPort: 8008 },
metrics: { containerPort: 9092 },
replication: { containerPort: 9093 },
},
env_: {
SYNAPSE_WORKER: workerType,
SYNAPSE_MACAROON_SECRET_KEY: cfg.macaroonSecretKey,
SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
POSTGRES_PASSWORD: app.postgres.cfg.password,
POSTGRES_USER: app.postgres.cfg.username,
POSTGRES_DB: app.postgres.cfg.database,
POSTGRES_HOST: app.postgres.cfg.host,
POSTGRES_PORT: app.postgres.cfg.port,
REDIS_PASSWORD: app.redis.cfg.password,
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
TURN_SHARED_SECRET: if cfg.coturn.enable then cfg.coturn.config.authSecret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(app.secretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
config: { mountPath: "/conf", },
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
readinessProbe: {
httpGet: {
path: "/health",
port: "http",
},
initialDelaySeconds: 5,
periodSeconds: 10,
},
livenessProbe: {
httpGet: {
path: "/health",
port: "http",
},
initialDelaySeconds: 60,
periodSeconds: 30,
},
},
},
securityContext: {
runAsUser: 991,
runAsGroup: 991,
fsGroup: 991,
},
},
},
},
},
# Synapse main process
main: {
deployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
localConfig: {
# Following configuration values need to cause master
# process restart.
notify_appservices: app.config.notify_appservices,
# send_federation: app.config.send_federation,
# federation_sender_instances: app.config.federation_sender_instances,
},
resources+: {
limits+: { memory: "4Gi" },
requests+: { memory: "2Gi" },
},
},
spec+: {
strategy+: {
rollingUpdate: {
maxSurge: 0,
maxUnavailable: 1,
},
},
},
},
svc: app.ns.Contain(kube.Service("synapse")) {
target_pod:: app.main.deployment.spec.template,
},
replicationSvc: app.ns.Contain(kube.Service("synapse-replication-master")) {
target_pod:: app.main.deployment.spec.template,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
],
},
},
},
genericWorker: {
# Synapse generic worker deployment
deployment: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["client", "federation"]}],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
svc: app.ns.Contain(kube.Service("synapse-generic")) {
target_pod:: app.genericWorker.deployment.spec.template,
},
# Following paths can be handled by generic workers.
# See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
paths:: [
"/_matrix/client/(r0|v3)/sync",
"/_matrix/client/(api/v1|r0|v3)/events",
"/_matrix/client/(api/v1|r0|v3)/initialSync",
"/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync",
"/_matrix/federation/v1/event/",
"/_matrix/federation/v1/state/",
"/_matrix/federation/v1/state_ids/",
"/_matrix/federation/v1/backfill/",
"/_matrix/federation/v1/get_missing_events/",
"/_matrix/federation/v1/publicRooms",
"/_matrix/federation/v1/query/",
"/_matrix/federation/v1/make_join/",
"/_matrix/federation/v1/make_leave/",
"/_matrix/federation/(v1|v2)/send_join/",
"/_matrix/federation/(v1|v2)/send_leave/",
"/_matrix/federation/(v1|v2)/invite/",
"/_matrix/federation/v1/event_auth/",
"/_matrix/federation/v1/exchange_third_party_invite/",
"/_matrix/federation/v1/user/devices/",
"/_matrix/federation/v1/get_groups_publicised",
"/_matrix/key/v2/query",
"/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/",
"/_matrix/federation/v1/send/",
"/_matrix/client/(api/v1|r0|v3|unstable)/createRoom",
"/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state",
"/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy",
"/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary",
"/_matrix/client/(r0|v3|unstable)/account/3pid",
"/_matrix/client/(r0|v3|unstable)/devices",
"/_matrix/client/versions",
"/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer",
"/_matrix/client/(r0|v3|unstable)/joined_groups",
"/_matrix/client/(r0|v3|unstable)/publicised_groups",
"/_matrix/client/(r0|v3|unstable)/publicised_groups/",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/",
"/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
"/_matrix/client/(api/v1|r0|v3|unstable)/search",
"/_matrix/client/(r0|v3|unstable)/keys/query",
"/_matrix/client/(r0|v3|unstable)/keys/changes",
"/_matrix/client/(r0|v3|unstable)/keys/claim",
"/_matrix/client/(r0|v3|unstable)/room_keys/",
"/_matrix/client/(api/v1|r0|v3|unstable)/login",
"/_matrix/client/(r0|v3|unstable)/register",
"/_matrix/client/v1/register/m.login.registration_token/validity",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/",
"/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)",
"/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"/_matrix/client/(api/v1|r0|v3|unstable)/profile/"
# These need to be handled by stream writers, not supported yet
# "/_matrix/client/(r0|v3|unstable)/sendToDevice/",
# "/_matrix/client/(r0|v3|unstable)/.*/tags",
# "/_matrix/client/(r0|v3|unstable)/.*/account_data",
# "/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
# "/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
# "/_matrix/client/(api/v1|r0|v3|unstable)/presence/",
],
},
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
mediaWorker: {
deployment: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["media"]}],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
svc: app.ns.Contain(kube.Service("synapse-media")) {
target_pod:: app.mediaWorker.deployment.spec.template,
},
},
appserviceWorker: if cfg.appserviceWorker then {
# Worker responsible for sending traffic to registered appservices
deployment: app.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: [] }],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
},
} else null,
federationSenderWorker: if cfg.federationWorker then {
deployment: app.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: [] }],
}, {
port: 9092,
type: "metrics",
bind_address: "0.0.0.0",
}],
},
},
spec+: {
replicas: 2,
},
},
} else null,
}