blob: 08c8153edfe96fc6a8d09e552ee9c398a472776b [file] [log] [blame]
# Matrix server (synapse).
# This needs a secret provisioned, create with:
# ns=matrix
#
# SIGNING_KEY="$(kubectl run -n $ns -i --quiet --restart=Never --rm synapse-generate-config --image=matrixdotorg/synapse:v1.19.2 --env SYNAPSE_SERVER_NAME=dummy --env SYNAPSE_REPORT_STATS=no -o yaml --command -- sh -c '/start.py generate >/dev/null && cat /data/*.signing.key')"
# kubectl -n $ns create secret generic synapse --from-literal=postgres_password=$(pwgen 24 1) --from-literal=macaroon_secret_key=$(pwgen 32 1) --from-literal=registration_shared_secret=$(pwgen 32 1) --from-literal=homeserver_signing_key="$SIGNING_KEY" --from-literal=redis_password=$(pwgen 32 1) --from-literal=worker_replication_secret=$(pwgen 32 1)
# kubectl -n $ns create secret generic oauth2-cas-proxy --from-literal=oauth2_secret=...
#
# When migrating from matrix.libsonnet, instance signing key, redis passwsord
# and worker replication secret need to be added to existing synapse secret:
#
# echo "homeserver_signing_key: $(kubectl -n $ns exec deploy/synapse -- sh -c 'cat /data/*.signing.key' | base64 -w0)"
# echo "redis_password: $(pwgen 32 1 | tr -d '\n' | base64 -w0)"
# echo "worker_replication_secret: $(pwgen 32 1 | tr -d '\n' | base64 -w0)"
# kubectl -n $ns edit secret synapse
# # ...add homeserver_signing_key, redis_password and worker_replication_secret keys
#
# Sequencing appservices is fun. The appservice needs to run first (for
# instance, via a bootstrap job), and on startup it will spit out a
# registration file. This registration file then needs to be fed to synapse -
# this is done via specialy named secrets (appservice-X-registration, for X key
# in the appservices object).
#
# For appservice-irc instances, you can use this oneliner magic to get the
# registration YAML from logs.
# kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-irc-freenode-bootstrap | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/')
#
# For appservice-telegram instances, you can use this oneliner magic:
# kubectl -n matrix create secret generic appservice-telegram-prod-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-telegram-prod-bootstrap | grep -A 100 SNIPSNIP | grep -v SNIPSNIP)
local kube = import "../../../kube/kube.libsonnet";
local postgres = import "../../../kube/postgres.libsonnet";
local redis = import "../../../kube/redis.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
# webDomain is the domain name at which element will run
webDomain: error "cfg.webDomain must be set",
# serverName is the server part of the MXID this homeserver will cover
serverName: error "cfg.serverName must be set",
storageClassName: "waw-hdd-redundant-3",
images: {
synapse: "matrixdotorg/synapse:v1.25.0",
riot: "vectorim/riot-web:v1.7.18",
casProxy: "registry.k0.hswaw.net/q3k/oauth2-cas-proxy:0.1.4",
appserviceIRC: "matrixdotorg/matrix-appservice-irc:release-0.17.1",
# That's v0.8.2 - we just don't trust that host to not re-tag images.
appserviceTelegram: "dock.mau.dev/tulir/mautrix-telegram@sha256:9e68eaa80c9e4a75d9a09ec92dc4898b12d48390e01efa4de40ce882a6f7e330",
wellKnown: "registry.k0.hswaw.net/q3k/wellknown:1611960794-adbf560851a46ad0e58b42f0daad7ef19535687c",
},
# OpenID Connect provider configuration.
# Currently only client_secret can be provided as a secretKeyRef.
#
# https://${cfg.webDomain}/_synapse/oidc/callback needs to be set as
# allowed OAuth2/OpenID Connect callback URL
#
# See: https://github.com/matrix-org/synapse/blob/v1.25.0/docs/openid.md
oidc: {
enable: false,
config: {
issuer: error "oidc.config.issuer must be set",
client_id: error "oidc.config.client_id must be set",
client_secret: error "oidc.config.client_secret must be set",
# Set this to true when migrating from existing CAS deployment
allow_existing_users: false,
user_mapping_provider: {
config: {
localpart_template: '{{ user.sub }}',
display_name_template: '{{ user.sub }}',
},
},
# Extra configuration required when migrating from
# oauth2-cas-proxy bound to https://sso.hackerspace.pl
# user_profile_method: "userinfo_endpoint",
# client_auth_method: "client_secret_post",
},
},
# Central Authentication Scheme, a single-sign-on system. Note: this flow is now called 'SSO' in Matrix, we keep this name for legacy reasons.
# Refer to https://matrix.org/docs/spec/client_server/r0.6.1#sso-client-login
cas: {
# whether to enable the CAS proxy (ie. connect to hswaw sso via OAuth)
enable: false,
# generate client ID and secret in with your OAuth2 provider, refer to https://www.oauth.com/oauth2-servers/client-registration/client-id-secret/
oauth2: {
clientID: error "cas.oauth2.clientID must be set",
clientSecret: error "cas.oauth2.clientSecret must be set",
scope: error "cas.oauth2.scope must be set",
authorizeURL: error "cas.oauth2.authorizeURL must be set",
tokenURL: error "cas.oauth2.tokenURL must be set",
userinfoURL: error "cas.oauth2.userinfoURL must be set",
},
},
# Serve /.well-known/matrix configuration endpoints required when using
# cfg.webDomain directly as mxid.
wellKnown: false,
},
metadata(component):: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": "matrix",
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": component,
},
},
namespace: kube.Namespace(cfg.namespace),
postgres3: postgres {
cfg+: {
namespace: cfg.namespace,
appName: "synapse",
database: "synapse",
username: "synapse",
prefix: "waw3-",
password: { secretKeyRef: { name: "synapse", key: "postgres_password" } },
storageClassName: cfg.storageClassName,
storageSize: "100Gi",
initdbArgs: "--encoding='UTF8' --lc-collate='C' --lc-ctype='C'",
},
},
redis: redis {
cfg+: {
namespace: cfg.namespace,
appName: "synapse",
storageClassName: cfg.storageClassName,
password: { secretKeyRef: { name: "synapse", key: "redis_password" } },
},
},
dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") {
metadata+: app.metadata("synapse-data"),
spec+: {
storageClassName: cfg.storageClassName,
accessModes: [ "ReadWriteOnce" ],
resources: {
requests: {
storage: "50Gi",
},
},
},
},
// homeserver.yaml that will be used to run synapse (in synapseConfigMap).
// This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
// deployment.
synapseConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
server_name: cfg.serverName,
public_baseurl: "https://%s" % [cfg.webDomain],
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
],
} + (if cfg.cas.enable then {
cas_config: {
enabled: true,
server_url: "https://%s/_cas" % [cfg.webDomain],
service_url: "https://%s" % [cfg.webDomain],
},
} else {}),
synapseConfigMap: kube.ConfigMap("synapse") {
metadata+: app.metadata("synapse"),
data: {
"homeserver.yaml": std.manifestYamlDoc(app.synapseConfig),
"log.config": importstr "synapse/log.config",
},
},
// homeserver-secrets.yaml contains all the templated secret variables from
// base homeserver.yaml passed as yaml-encoded environment variable.
// $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
synapseSecretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
} + (if cfg.oidc.enable then {
oidc_config: cfg.oidc.config {
enabled: true,
client_secret: "$(OIDC_CLIENT_SECRET)",
},
} else {}),
cas: if cfg.cas.enable && cfg.oidc.enable then error "cfg.cas.enable and cfg.oidc.enable options are exclusive"
else if cfg.cas.enable then {
deployment: kube.Deployment("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
proxy: kube.Container("oauth2-cas-proxy") {
image: cfg.images.casProxy,
ports_: {
http: { containerPort: 5000 },
},
env_: {
BASE_URL: "https://%s" % [cfg.webDomain],
SERVICE_URL: "https://%s" % [cfg.webDomain],
OAUTH2_CLIENT: cfg.cas.oauth2.clientID,
OAUTH2_SECRET: cfg.cas.oauth2.clientSecret,
OAUTH2_SCOPE: cfg.cas.oauth2.scope,
OAUTH2_AUTHORIZE: cfg.cas.oauth2.authorizeURL,
OAUTH2_TOKEN: cfg.cas.oauth2.tokenURL,
OAUTH2_USERINFO: cfg.cas.oauth2.userinfoURL,
},
},
},
},
},
},
},
svc: kube.Service("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
target_pod:: app.cas.deployment.spec.template,
},
},
# Synapse process Deployment/StatefulSet base resource.
SynapseWorker(name, workerType, builder):: builder(name) {
local worker = self,
cfg:: {
# Configuration customization. Can contain environment substitution
# syntax, as used in worker_name value.
localConfig: {
worker_app: workerType,
worker_name: "$(POD_NAME)",
# The replication listener on the main synapse process.
worker_replication_host: "synapse-replication-master",
worker_replication_http_port: 9093,
},
# Mount app.dataVolume in /data
mountData: false,
},
metadata+: app.metadata(name),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.synapseConfigMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
image: cfg.images.synapse,
command: [
"/bin/sh", "-c", |||
set -e
echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
|||
],
ports_: {
http: { containerPort: 8008 },
metrics: { containerPort: 9092 },
replication: { containerPort: 9093 },
},
env_: {
SYNAPSE_WORKER: workerType,
SYNAPSE_MACAROON_SECRET_KEY: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } },
SYNAPSE_REGISTRATION_SHARED_SECRET: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } },
WORKER_REPLICATION_SECRET: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } },
POSTGRES_PASSWORD: { secretKeyRef: { name: "synapse", key: "postgres_password" } },
REDIS_PASSWORD: { secretKeyRef: { name: "synapse", key: "redis_password" } },
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(app.synapseSecretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
config: { mountPath: "/conf", },
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
},
},
securityContext: {
runAsUser: 991,
runAsGroup: 991,
fsGroup: 991,
},
},
},
},
},
# Synapse main process
synapseDeployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
# Main process doesn't need any configuration customization
localConfig: {}
},
},
synapseSvc: kube.Service("synapse") {
metadata+: app.metadata("synapse"),
target_pod:: app.synapseDeployment.spec.template,
},
synapseReplicationSvc: kube.Service("synapse-replication-master") {
metadata+: app.metadata("synapse-replication-master"),
target_pod:: app.synapseDeployment.spec.template,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
],
},
},
# Synapse generic worker deployment
synapseGenericWorker: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["client", "federation"]}],
}],
},
},
},
synapseGenericSvc: kube.Service("synapse-generic") {
metadata+: app.metadata("synapse-generic"),
target_pod:: app.synapseGenericWorker.spec.template,
},
# Following paths can be handled by generic workers.
# See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
synapseGenericWorkerPaths:: [
"/_matrix/client/(v2_alpha|r0)/sync",
"/_matrix/client/(api/v1|v2_alpha|r0)/events",
"/_matrix/client/(api/v1|r0)/initialSync",
"/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync",
"/_matrix/client/(api/v1|r0|unstable)/publicRooms",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state",
"/_matrix/client/(api/v1|r0|unstable)/account/3pid",
"/_matrix/client/(api/v1|r0|unstable)/keys/query",
"/_matrix/client/(api/v1|r0|unstable)/keys/changes",
"/_matrix/client/versions",
"/_matrix/client/(api/v1|r0|unstable)/voip/turnServer",
"/_matrix/client/(api/v1|r0|unstable)/joined_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups/",
# Blocked by https://github.com/matrix-org/synapse/issues/8966
# "/_matrix/client/(api/v1|r0|unstable)/login",
# "/_matrix/client/(r0|unstable)/register",
# "/_matrix/client/(r0|unstable)/auth/.*/fallback/web",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)",
"/_matrix/client/(api/v1|r0|unstable)/join/",
"/_matrix/client/(api/v1|r0|unstable)/profile/",
"/_matrix/federation/v1/event/",
"/_matrix/federation/v1/state/",
"/_matrix/federation/v1/state_ids/",
"/_matrix/federation/v1/backfill/",
"/_matrix/federation/v1/get_missing_events/",
"/_matrix/federation/v1/publicRooms",
"/_matrix/federation/v1/query/",
"/_matrix/federation/v1/make_join/",
"/_matrix/federation/v1/make_leave/",
"/_matrix/federation/v1/send_join/",
"/_matrix/federation/v2/send_join/",
"/_matrix/federation/v1/send_leave/",
"/_matrix/federation/v2/send_leave/",
"/_matrix/federation/v1/invite/",
"/_matrix/federation/v2/invite/",
"/_matrix/federation/v1/query_auth/",
"/_matrix/federation/v1/event_auth/",
"/_matrix/federation/v1/exchange_third_party_invite/",
"/_matrix/federation/v1/user/devices/",
"/_matrix/federation/v1/get_groups_publicised",
"/_matrix/key/v2/query",
"/_matrix/federation/v1/send/",
],
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
synapseMediaWorker: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["media"]}],
}],
},
},
},
synapseMediaSvc: kube.Service("synapse-media") {
metadata+: app.metadata("synapse-media"),
target_pod:: app.synapseMediaWorker.spec.template,
},
riotConfig:: {
"default_hs_url": "https://%s" % [cfg.webDomain],
"disable_custom_urls": false,
"disable_guests": false,
"disable_login_language_selector": false,
"disable_3pid_login": true,
"brand": "Riot",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html",
"bug_report_endpoint_url": "https://riot.im/bugreports/submit",
"features": {
"feature_groups": "labs",
"feature_pinning": "labs",
"feature_reactions": "labs"
},
"default_federate": true,
"default_theme": "light",
"roomDirectory": {
"servers": [
cfg.serverName,
]
},
"welcomeUserId": "@riot-bot:matrix.org",
"enable_presence_by_hs_url": {
"https://matrix.org": false
}
},
riotConfigMap: kube.ConfigMap("riot-web-config") {
metadata+: app.metadata("riot-web-config"),
data: {
"config.json": std.manifestJsonEx(app.riotConfig, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": importstr "riot-nginx.conf",
},
},
riotDeployment: kube.Deployment("riot-web") {
metadata+: app.metadata("riot-web"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.riotConfigMap),
},
containers_: {
web: kube.Container("riot-web") {
image: cfg.images.riot,
ports_: {
http: { containerPort: 8080 },
},
volumeMounts: [
{
name: "config",
mountPath: "/app/config.json",
subPath: "config.json",
},
{
name: "config",
mountPath: "/etc/nginx/nginx.conf",
subPath: "nginx.conf",
},
],
},
},
securityContext: {
// nginx:nginx
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
riotSvc: kube.Service("riot-web") {
metadata+: app.metadata("riot-web"),
target_pod:: app.riotDeployment.spec.template,
},
wellKnown: if cfg.wellKnown then {
deployment: kube.Deployment("wellknown") {
metadata+: app.metadata("wellknown"),
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
web: kube.Container("wellknown") {
image: cfg.images.wellKnown,
ports_: {
http: { containerPort: 8080 },
},
command: ["/app/matrix/wellknown"],
args: ["-hspki_disable", "-domain", cfg.webDomain],
},
},
securityContext: {
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
svc: kube.Service("wellknown") {
metadata+: app.metadata("wellknown"),
target_pod:: app.wellKnown.deployment.spec.template,
},
} else {},
// Any appservice you add here will require an appservice-X-registration
// secret containing a registration.yaml file. Adding something to this
// dictionary will cause Synapse to not start until that secret is
// available - so change things carefully!
// If bootstrapping a new appservice, just keep it out of this dictionary
// until it spits you a registration YAML and you feed that to a secret.
appservices: {},
ingress: kube.Ingress("matrix") {
metadata+: app.metadata("matrix") {
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
"nginx.ingress.kubernetes.io/proxy-body-size": "0",
"nginx.ingress.kubernetes.io/use-regex": "true",
},
},
spec+: {
tls: [
{
hosts: [cfg.webDomain],
secretName: "synapse-tls",
},
],
rules: [
{
host: cfg.webDomain,
http: {
paths: [
{ path: path, backend: app.synapseGenericSvc.name_port }
for path in app.synapseGenericWorkerPaths
] + [
{ path: "/", backend: app.riotSvc.name_port },
{ path: "/_matrix/media/", backend: app.synapseMediaSvc.name_port },
{ path: "/_matrix/", backend: app.synapseSvc.name_port },
# Used by OpenID Connect login flow
{ path: "/_synapse/", backend: app.synapseSvc.name_port },
] + (if cfg.cas.enable then [
{ path: "/_cas", backend: app.cas.svc.name_port },
] else []) + (if cfg.wellKnown then [
{ path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
] else [])
},
}
],
},
},
}