| # Matrix server (synapse). |
| # This needs a secret provisioned, create with: |
| # ns=matrix |
| # |
| # SIGNING_KEY="$(kubectl run -n $ns -i --quiet --restart=Never --rm synapse-generate-config --image=matrixdotorg/synapse:v1.19.2 --env SYNAPSE_SERVER_NAME=dummy --env SYNAPSE_REPORT_STATS=no -o yaml --command -- sh -c '/start.py generate >/dev/null && cat /data/*.signing.key')" |
| # kubectl -n $ns create secret generic synapse --from-literal=postgres_password=$(pwgen 24 1) --from-literal=macaroon_secret_key=$(pwgen 32 1) --from-literal=registration_shared_secret=$(pwgen 32 1) --from-literal=homeserver_signing_key="$SIGNING_KEY" --from-literal=redis_password=$(pwgen 32 1) --from-literal=worker_replication_secret=$(pwgen 32 1) |
| # kubectl -n $ns create secret generic oauth2-cas-proxy --from-literal=oauth2_secret=... |
| # |
| # When migrating from matrix.libsonnet, instance signing key, redis passwsord |
| # and worker replication secret need to be added to existing synapse secret: |
| # |
| # echo "homeserver_signing_key: $(kubectl -n $ns exec deploy/synapse -- sh -c 'cat /data/*.signing.key' | base64 -w0)" |
| # echo "redis_password: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" |
| # echo "worker_replication_secret: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" |
| # kubectl -n $ns edit secret synapse |
| # # ...add homeserver_signing_key, redis_password and worker_replication_secret keys |
| # |
| # Additionally some resources need to be explicitly removed due to |
| # label/annotations changes: |
| # kubectl -n $ns delete deployment riot-web oauth2-cas-proxy wellknown synapse |
| # |
| # Some service configuration customization fields have been renamed: |
| # .riotConfig → .riot.config |
| # .synapseConfig → .synapse.config |
| # |
| # When migrating from CAS to OpenID Connect authentication scheme following need |
| # to be ensured: |
| # * https://{homeserver}/_synapse/oidc/callback is added to allowed callback URLs list |
| # * openid scope is enabled for configured client |
| # |
| # In order to deploy matrix-media-repo as a replacement for synapse built-in |
| # media workers the following steps need to be carried out: |
| # |
| # 1. Generate password and bootstrap extra postgres user |
| # pwgen 32 1 > secrets/plain/media-repo-$ns-postgres |
| # echo "create database mediarepo; create user mediarepo with password '$(cat secrets/plain/media-repo-$ns-postgres)'; grant all privileges on database mediarepo to mediarepo;" | kubectl -n $ns exec -it deploy/waw3-postgres psql |
| # secretstore sync secrets |
| # |
| # 2. Fetch Ceph RGW credentials |
| # kubectl get secrets -n ceph-waw3 rook-ceph-object-user-waw-hdd-redundant-3-object-$ns -o json | jq '.data|map_values(@base64d)' > secrets/plain/media-repo-$ns-ceph.json |
| # secretstore sync secrets |
| # |
| # 3. Create an apropriate bucket using s3cmd |
| # s3cmd --access_key="$(jq -r '.AccessKey' secrets/plain/media-repo-$ns-ceph.json)" --secret_key="$(jq -r '.SecretKey' secrets/plain/media-repo-$ns-ceph.json)" --host=object.ceph-waw3.hswaw.net --host-bucket=object.ceph-waw3.hswaw.net mb s3://media-repo-$ns |
| # |
| # 4. Add relevant configuration overrides in cfg.mediaRepo key for your |
| # deployment configuration file: |
| # |
| # mediaRepo+: { |
| # enable: true, |
| # route: false, |
| # s3+: { |
| # endpoint: std.strReplace((import "secrets/plain/media-repo-$ns-ceph.json").Endpoint, "http://", ""), |
| # accessKey: (import "secrets/plain/media-repo-$ns-ceph.json").AccessKey, |
| # secretKey: (import "secrets/plain/media-repo-$ns-ceph.json").SecretKey, |
| # bucketName: "media-repo-$ns", |
| # region: "eu", |
| # }, |
| # db+: { |
| # password: std.strReplace(importstr "secrets/plain/media-repo-$ns-postgres", "\n", ""), |
| # }, |
| # }, |
| # |
| # 5. Additionally, when migrating from already deployed synapse media worker the |
| # following command needs to be run in order to import existing media files: |
| # kubectl -n $ns exec deploy/media-repo -- import_synapse -baseUrl http://synapse-media:8008 -dbHost waw3-postgres -dbPassword "$(kubectl -n $ns get secret synapse -o json | jq -r '.data.postgres_password | @base64d')" -config /config/config.yaml -serverName 'SERVER_NAME' |
| # |
| # 6. After migrating data over from native synapse media worker storage traffic |
| # can be rerouted to matrix-media-repo by switching cfg.mediaRepo.route flag |
| # to true |
| # |
| # 7. Run import step #5 again to make sure no media were left missing in old |
| # media worker deployment - import operation is indempotent and can be ran |
| # against a synapse media worker that's not handling user traffic anymore. |
| # |
| # Sequencing appservices is fun. The appservice needs to run first (for |
| # instance, via a bootstrap job), and on startup it will spit out a |
| # registration file. This registration file then needs to be fed to synapse - |
| # this is done via specialy named secrets (appservice-X-registration, for X key |
| # in the appservices object). |
| # |
| # For appservice-irc instances, you can use this oneliner magic to get the |
| # registration YAML from logs. |
| # kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-irc-freenode-bootstrap | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/') |
| # |
| # For appservice-telegram instances, you can use this oneliner magic: |
| # kubectl -n matrix create secret generic appservice-telegram-prod-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-telegram-prod-bootstrap | grep -A 100 SNIPSNIP | grep -v SNIPSNIP) |
| |
| local kube = import "../../../kube/kube.libsonnet"; |
| local postgres = import "../../../kube/postgres.libsonnet"; |
| local redis = import "../../../kube/redis.libsonnet"; |
| |
| local riot = import "./riot.libsonnet"; |
| local cas = import "./cas.libsonnet"; |
| local wellKnown = import "./wellknown.libsonnet"; |
| local synapse = import "./synapse.libsonnet"; |
| local mediaRepo = import "./media-repo.libsonnet"; |
| |
| { |
| local app = self, |
| local cfg = app.cfg, |
| cfg:: { |
| namespace: error "cfg.namespace must be set", |
| # webDomain is the domain name at which element will run |
| webDomain: error "cfg.webDomain must be set", |
| # serverName is the server part of the MXID this homeserver will cover |
| serverName: error "cfg.serverName must be set", |
| storageClassName: "waw-hdd-redundant-3", |
| |
| images: { |
| synapse: "matrixdotorg/synapse:v1.56.0", |
| riot: "vectorim/element-web:v1.10.10", |
| casProxy: "registry.k0.hswaw.net/q3k/oauth2-cas-proxy:0.1.4", |
| appserviceIRC: "matrixdotorg/matrix-appservice-irc:release-0.34.0", |
| appserviceTelegram: "dock.mau.dev/tulir/mautrix-telegram@sha256:c6e25cb57e1b67027069e8dc2627338df35d156315c004a6f2b34b6aeaa79f77", |
| wellKnown: "registry.k0.hswaw.net/q3k/wellknown:1611960794-adbf560851a46ad0e58b42f0daad7ef19535687c", |
| mediaRepo: "turt2live/matrix-media-repo:v1.2.8", |
| }, |
| |
| # OpenID Connect provider configuration. |
| # Currently only client_secret can be provided as a secretKeyRef. |
| # |
| # https://${cfg.webDomain}/_synapse/oidc/callback needs to be set as |
| # allowed OAuth2/OpenID Connect callback URL |
| # |
| # See: https://github.com/matrix-org/synapse/blob/v1.25.0/docs/openid.md |
| oidc: { |
| enable: false, |
| config: { |
| issuer: error "oidc.config.issuer must be set", |
| client_id: error "oidc.config.client_id must be set", |
| client_secret: error "oidc.config.client_secret must be set", |
| |
| # Set this to true when migrating from existing CAS deployment |
| allow_existing_users: false, |
| user_mapping_provider: { |
| config: { |
| localpart_template: '{{ user.sub }}', |
| display_name_template: '{{ user.sub }}', |
| }, |
| }, |
| |
| # Extra configuration required when migrating from |
| # oauth2-cas-proxy bound to https://sso.hackerspace.pl |
| # user_profile_method: "userinfo_endpoint", |
| # client_auth_method: "client_secret_post", |
| }, |
| }, |
| |
| # Central Authentication Scheme, a single-sign-on system. Note: this flow is now called 'SSO' in Matrix, we keep this name for legacy reasons. |
| # Refer to https://matrix.org/docs/spec/client_server/r0.6.1#sso-client-login |
| cas: { |
| # whether to enable the CAS proxy (ie. connect to hswaw sso via OAuth) |
| enable: false, |
| # generate client ID and secret in with your OAuth2 provider, refer to https://www.oauth.com/oauth2-servers/client-registration/client-id-secret/ |
| oauth2: { |
| clientID: error "cas.oauth2.clientID must be set", |
| clientSecret: error "cas.oauth2.clientSecret must be set", |
| scope: error "cas.oauth2.scope must be set", |
| authorizeURL: error "cas.oauth2.authorizeURL must be set", |
| tokenURL: error "cas.oauth2.tokenURL must be set", |
| userinfoURL: error "cas.oauth2.userinfoURL must be set", |
| }, |
| }, |
| |
| # Serve /.well-known/matrix configuration endpoints required when using |
| # cfg.webDomain directly as mxid. |
| wellKnown: false, |
| |
| # matrix-media-repo S3-based media storage container |
| mediaRepo: { |
| enable: false, |
| |
| # Route /_matrix/media/ endpoints to matrix-media-repo. Set this |
| # to true after migrating media files to matrix-media-repo. |
| route: false, |
| |
| s3: { |
| endpoint: error "mediaRepo.s3.endpoint needs to be set", |
| accessKey: error "mediaRepo.s3.accessKey needs to be set", |
| secretKey: error "mediaRepo.s3.secretKey needs to be set", |
| bucketName: error "mediaRepo.s3.bucketName needs to be set", |
| region: error "mediaRepo.s3.region needs to be set", |
| }, |
| |
| db: { |
| username: "mediarepo", |
| password: error "mediaRepo.db.password needs to be set", |
| database: "mediarepo", |
| host: "waw3-postgres", |
| }, |
| }, |
| |
| # List of administrative users MXIDs (used in matrix-media-repo only) |
| admins: [], |
| }, |
| |
| # DEPRECATED: this needs to be removed in favor of namespace.Contain() in |
| # modules that depend on this (appservices/instance defintions) |
| metadata(component):: { |
| namespace: cfg.namespace, |
| labels: { |
| "app.kubernetes.io/name": "matrix", |
| "app.kubernetes.io/managed-by": "kubecfg", |
| "app.kubernetes.io/component": component, |
| }, |
| }, |
| |
| namespace: kube.Namespace(cfg.namespace), |
| |
| postgres3: postgres { |
| cfg+: { |
| namespace: cfg.namespace, |
| appName: "synapse", |
| database: "synapse", |
| username: "synapse", |
| prefix: "waw3-", |
| password: { secretKeyRef: { name: "synapse", key: "postgres_password" } }, |
| storageClassName: cfg.storageClassName, |
| storageSize: "100Gi", |
| initdbArgs: "--encoding='UTF8' --lc-collate='C' --lc-ctype='C'", |
| |
| opts: { |
| max_connections: "300", |
| shared_buffers: "80MB", |
| }, |
| }, |
| }, |
| |
| redis: redis { |
| cfg+: { |
| namespace: cfg.namespace, |
| appName: "synapse", |
| storageClassName: cfg.storageClassName, |
| password: { secretKeyRef: { name: "synapse", key: "redis_password" } }, |
| persistence: false, |
| }, |
| }, |
| |
| riot: riot { |
| ns: app.namespace, |
| cfg+: { |
| webDomain: cfg.webDomain, |
| serverName: cfg.serverName, |
| image: cfg.images.riot, |
| }, |
| }, |
| |
| cas: if cfg.cas.enable && cfg.oidc.enable then error "cfg.cas.enable and cfg.oidc.enable options are exclusive" |
| else if cfg.cas.enable then cas { |
| ns: app.namespace, |
| cfg+: { |
| image: cfg.images.casProxy, |
| webDomain: cfg.webDomain, |
| oauth2: cfg.cas.oauth2, |
| }, |
| }, |
| |
| wellKnown: if cfg.wellKnown then wellKnown { |
| ns: app.namespace, |
| cfg+: { |
| image: cfg.images.wellKnown, |
| webDomain: cfg.webDomain, |
| }, |
| } else {}, |
| |
| mediaRepo: if cfg.mediaRepo.enable then mediaRepo { |
| ns: app.namespace, |
| cfg+: { |
| image: cfg.images.mediaRepo, |
| |
| homeservers: [ |
| {name: cfg.serverName, csApi: "https://" + cfg.webDomain} |
| ], |
| admins: cfg.admins, |
| |
| s3: cfg.mediaRepo.s3, |
| db: cfg.mediaRepo.db, |
| }, |
| } else {}, |
| |
| synapse: synapse { |
| ns: app.namespace, |
| postgres: app.postgres3, |
| redis: app.redis, |
| appservices: app.appservices, |
| cfg+: app.cfg { |
| image: app.cfg.images.synapse, |
| |
| macaroonSecretKey: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } }, |
| registrationSharedSecret: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } }, |
| workerReplicationSecret: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } }, |
| }, |
| }, |
| |
| // Any appservice you add here will require an appservice-X-registration |
| // secret containing a registration.yaml file. Adding something to this |
| // dictionary will cause Synapse to not start until that secret is |
| // available - so change things carefully! |
| // If bootstrapping a new appservice, just keep it out of this dictionary |
| // until it spits you a registration YAML and you feed that to a secret. |
| appservices: {}, |
| |
| ingress: app.namespace.Contain(kube.Ingress("matrix")) { |
| metadata+: { |
| annotations+: { |
| "kubernetes.io/tls-acme": "true", |
| "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod", |
| "nginx.ingress.kubernetes.io/proxy-body-size": "0", |
| "nginx.ingress.kubernetes.io/use-regex": "true", |
| }, |
| }, |
| spec+: { |
| tls: [ |
| { |
| hosts: [cfg.webDomain], |
| secretName: "synapse-tls", |
| }, |
| ], |
| rules: [ |
| { |
| host: cfg.webDomain, |
| http: { |
| paths: [ |
| { path: path, backend: app.synapse.genericWorker.svc.name_port } |
| for path in app.synapse.genericWorker.paths |
| ] + [ |
| { path: "/", backend: app.riot.svc.name_port }, |
| { path: "/_matrix/media/", backend: if cfg.mediaRepo.route then app.mediaRepo.svc.name_port else app.synapse.mediaWorker.svc.name_port }, |
| { path: "/_matrix/", backend: app.synapse.main.svc.name_port }, |
| |
| # Used by OpenID Connect login flow |
| { path: "/_synapse/", backend: app.synapse.main.svc.name_port }, |
| ] + (if cfg.cas.enable then [ |
| { path: "/_cas", backend: app.cas.svc.name_port }, |
| ] else []) + (if cfg.wellKnown then [ |
| { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port }, |
| ] else []) |
| }, |
| } |
| ], |
| }, |
| }, |
| } |