Piotr Dobrowolski | 8ec8657 | 2021-01-30 13:06:07 +0100 | [diff] [blame^] | 1 | # Matrix server (synapse). |
| 2 | # This needs a secret provisioned, create with: |
| 3 | # ns=matrix |
| 4 | # |
| 5 | # SIGNING_KEY="$(kubectl run -n $ns -i --quiet --restart=Never --rm synapse-generate-config --image=matrixdotorg/synapse:v1.19.2 --env SYNAPSE_SERVER_NAME=dummy --env SYNAPSE_REPORT_STATS=no -o yaml --command -- sh -c '/start.py generate >/dev/null && cat /data/*.signing.key')" |
| 6 | # kubectl -n $ns create secret generic synapse --from-literal=postgres_password=$(pwgen 24 1) --from-literal=macaroon_secret_key=$(pwgen 32 1) --from-literal=registration_shared_secret=$(pwgen 32 1) --from-literal=homeserver_signing_key="$SIGNING_KEY" --from-literal=redis_password=$(pwgen 32 1) --from-literal=worker_replication_secret=$(pwgen 32 1) |
| 7 | # kubectl -n $ns create secret generic oauth2-cas-proxy --from-literal=oauth2_secret=... |
| 8 | # |
| 9 | # When migrating from matrix.libsonnet, instance signing key, redis passwsord |
| 10 | # and worker replication secret need to be added to existing synapse secret: |
| 11 | # |
| 12 | # echo "homeserver_signing_key: $(kubectl -n $ns exec deploy/synapse -- sh -c 'cat /data/*.signing.key' | base64 -w0)" |
| 13 | # echo "redis_password: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" |
| 14 | # echo "worker_replication_secret: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" |
| 15 | # kubectl -n $ns edit secret synapse |
| 16 | # # ...add homeserver_signing_key, redis_password and worker_replication_secret keys |
| 17 | # |
| 18 | # Sequencing appservices is fun. The appservice needs to run first (for |
| 19 | # instance, via a bootstrap job), and on startup it will spit out a |
| 20 | # registration file. This registration file then needs to be fed to synapse - |
| 21 | # this is done via specialy named secrets (appservice-X-registration, for X key |
| 22 | # in the appservices object). |
| 23 | # |
| 24 | # For appservice-irc instances, you can use this oneliner magic to get the |
| 25 | # registration YAML from logs. |
| 26 | # kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-irc-freenode-bootstrap | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/') |
| 27 | # |
| 28 | # For appservice-telegram instances, you can use this oneliner magic: |
| 29 | # kubectl -n matrix create secret generic appservice-telegram-prod-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-telegram-prod-bootstrap | grep -A 100 SNIPSNIP | grep -v SNIPSNIP) |
| 30 | |
| 31 | local kube = import "../../../kube/kube.libsonnet"; |
| 32 | local postgres = import "../../../kube/postgres.libsonnet"; |
| 33 | local redis = import "../../../kube/redis.libsonnet"; |
| 34 | |
| 35 | { |
| 36 | local app = self, |
| 37 | local cfg = app.cfg, |
| 38 | cfg:: { |
| 39 | namespace: error "cfg.namespace must be set", |
| 40 | # webDomain is the domain name at which element will run |
| 41 | webDomain: error "cfg.webDomain must be set", |
| 42 | # serverName is the server part of the MXID this homeserver will cover |
| 43 | serverName: error "cfg.serverName must be set", |
| 44 | storageClassName: "waw-hdd-redundant-3", |
| 45 | |
| 46 | images: { |
| 47 | synapse: "matrixdotorg/synapse:v1.25.0", |
| 48 | riot: "vectorim/riot-web:v1.7.18", |
| 49 | casProxy: "registry.k0.hswaw.net/q3k/oauth2-cas-proxy:0.1.4", |
| 50 | appserviceIRC: "matrixdotorg/matrix-appservice-irc:release-0.17.1", |
| 51 | # That's v0.8.2 - we just don't trust that host to not re-tag images. |
| 52 | appserviceTelegram: "dock.mau.dev/tulir/mautrix-telegram@sha256:9e68eaa80c9e4a75d9a09ec92dc4898b12d48390e01efa4de40ce882a6f7e330", |
| 53 | wellKnown: "registry.k0.hswaw.net/q3k/wellknown:1611960794-adbf560851a46ad0e58b42f0daad7ef19535687c", |
| 54 | }, |
| 55 | |
| 56 | # OpenID Connect provider configuration. |
| 57 | # Currently only client_secret can be provided as a secretKeyRef. |
| 58 | # |
| 59 | # https://${cfg.webDomain}/_synapse/oidc/callback needs to be set as |
| 60 | # allowed OAuth2/OpenID Connect callback URL |
| 61 | # |
| 62 | # See: https://github.com/matrix-org/synapse/blob/v1.25.0/docs/openid.md |
| 63 | oidc: { |
| 64 | enable: false, |
| 65 | config: { |
| 66 | issuer: error "oidc.config.issuer must be set", |
| 67 | client_id: error "oidc.config.client_id must be set", |
| 68 | client_secret: error "oidc.config.client_secret must be set", |
| 69 | |
| 70 | # Set this to true when migrating from existing CAS deployment |
| 71 | allow_existing_users: false, |
| 72 | user_mapping_provider: { |
| 73 | config: { |
| 74 | localpart_template: '{{ user.sub }}', |
| 75 | display_name_template: '{{ user.sub }}', |
| 76 | }, |
| 77 | }, |
| 78 | |
| 79 | # Extra configuration required when migrating from |
| 80 | # oauth2-cas-proxy bound to https://sso.hackerspace.pl |
| 81 | # user_profile_method: "userinfo_endpoint", |
| 82 | # client_auth_method: "client_secret_post", |
| 83 | }, |
| 84 | }, |
| 85 | |
| 86 | # Central Authentication Scheme, a single-sign-on system. Note: this flow is now called 'SSO' in Matrix, we keep this name for legacy reasons. |
| 87 | # Refer to https://matrix.org/docs/spec/client_server/r0.6.1#sso-client-login |
| 88 | cas: { |
| 89 | # whether to enable the CAS proxy (ie. connect to hswaw sso via OAuth) |
| 90 | enable: false, |
| 91 | # generate client ID and secret in with your OAuth2 provider, refer to https://www.oauth.com/oauth2-servers/client-registration/client-id-secret/ |
| 92 | oauth2: { |
| 93 | clientID: error "cas.oauth2.clientID must be set", |
| 94 | clientSecret: error "cas.oauth2.clientSecret must be set", |
| 95 | scope: error "cas.oauth2.scope must be set", |
| 96 | authorizeURL: error "cas.oauth2.authorizeURL must be set", |
| 97 | tokenURL: error "cas.oauth2.tokenURL must be set", |
| 98 | userinfoURL: error "cas.oauth2.userinfoURL must be set", |
| 99 | }, |
| 100 | }, |
| 101 | |
| 102 | # Serve /.well-known/matrix configuration endpoints required when using |
| 103 | # cfg.webDomain directly as mxid. |
| 104 | wellKnown: false, |
| 105 | }, |
| 106 | |
| 107 | metadata(component):: { |
| 108 | namespace: cfg.namespace, |
| 109 | labels: { |
| 110 | "app.kubernetes.io/name": "matrix", |
| 111 | "app.kubernetes.io/managed-by": "kubecfg", |
| 112 | "app.kubernetes.io/component": component, |
| 113 | }, |
| 114 | }, |
| 115 | |
| 116 | namespace: kube.Namespace(cfg.namespace), |
| 117 | |
| 118 | postgres3: postgres { |
| 119 | cfg+: { |
| 120 | namespace: cfg.namespace, |
| 121 | appName: "synapse", |
| 122 | database: "synapse", |
| 123 | username: "synapse", |
| 124 | prefix: "waw3-", |
| 125 | password: { secretKeyRef: { name: "synapse", key: "postgres_password" } }, |
| 126 | storageClassName: cfg.storageClassName, |
| 127 | storageSize: "100Gi", |
| 128 | initdbArgs: "--encoding='UTF8' --lc-collate='C' --lc-ctype='C'", |
| 129 | }, |
| 130 | }, |
| 131 | |
| 132 | redis: redis { |
| 133 | cfg+: { |
| 134 | namespace: cfg.namespace, |
| 135 | appName: "synapse", |
| 136 | storageClassName: cfg.storageClassName, |
| 137 | password: { secretKeyRef: { name: "synapse", key: "redis_password" } }, |
| 138 | }, |
| 139 | }, |
| 140 | |
| 141 | dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") { |
| 142 | metadata+: app.metadata("synapse-data"), |
| 143 | spec+: { |
| 144 | storageClassName: cfg.storageClassName, |
| 145 | accessModes: [ "ReadWriteOnce" ], |
| 146 | resources: { |
| 147 | requests: { |
| 148 | storage: "50Gi", |
| 149 | }, |
| 150 | }, |
| 151 | }, |
| 152 | }, |
| 153 | |
| 154 | // homeserver.yaml that will be used to run synapse (in synapseConfigMap). |
| 155 | // This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per |
| 156 | // deployment. |
| 157 | synapseConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] { |
| 158 | server_name: cfg.serverName, |
| 159 | public_baseurl: "https://%s" % [cfg.webDomain], |
| 160 | signing_key_path: "/secrets/homeserver_signing_key", |
| 161 | app_service_config_files: [ |
| 162 | "/appservices/%s/registration.yaml" % [k] |
| 163 | for k in std.objectFields(app.appservices) |
| 164 | ], |
| 165 | } + (if cfg.cas.enable then { |
| 166 | cas_config: { |
| 167 | enabled: true, |
| 168 | server_url: "https://%s/_cas" % [cfg.webDomain], |
| 169 | service_url: "https://%s" % [cfg.webDomain], |
| 170 | }, |
| 171 | } else {}), |
| 172 | |
| 173 | synapseConfigMap: kube.ConfigMap("synapse") { |
| 174 | metadata+: app.metadata("synapse"), |
| 175 | data: { |
| 176 | "homeserver.yaml": std.manifestYamlDoc(app.synapseConfig), |
| 177 | "log.config": importstr "synapse/log.config", |
| 178 | }, |
| 179 | }, |
| 180 | |
| 181 | // homeserver-secrets.yaml contains all the templated secret variables from |
| 182 | // base homeserver.yaml passed as yaml-encoded environment variable. |
| 183 | // $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup |
| 184 | synapseSecretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] { |
| 185 | } + (if cfg.oidc.enable then { |
| 186 | oidc_config: cfg.oidc.config { |
| 187 | enabled: true, |
| 188 | client_secret: "$(OIDC_CLIENT_SECRET)", |
| 189 | }, |
| 190 | } else {}), |
| 191 | |
| 192 | cas: if cfg.cas.enable && cfg.oidc.enable then error "cfg.cas.enable and cfg.oidc.enable options are exclusive" |
| 193 | else if cfg.cas.enable then { |
| 194 | deployment: kube.Deployment("oauth2-cas-proxy") { |
| 195 | metadata+: app.metadata("oauth2-cas-proxy"), |
| 196 | spec+: { |
| 197 | replicas: 1, |
| 198 | template+: { |
| 199 | spec+: { |
| 200 | containers_: { |
| 201 | proxy: kube.Container("oauth2-cas-proxy") { |
| 202 | image: cfg.images.casProxy, |
| 203 | ports_: { |
| 204 | http: { containerPort: 5000 }, |
| 205 | }, |
| 206 | env_: { |
| 207 | BASE_URL: "https://%s" % [cfg.webDomain], |
| 208 | SERVICE_URL: "https://%s" % [cfg.webDomain], |
| 209 | OAUTH2_CLIENT: cfg.cas.oauth2.clientID, |
| 210 | OAUTH2_SECRET: cfg.cas.oauth2.clientSecret, |
| 211 | OAUTH2_SCOPE: cfg.cas.oauth2.scope, |
| 212 | OAUTH2_AUTHORIZE: cfg.cas.oauth2.authorizeURL, |
| 213 | OAUTH2_TOKEN: cfg.cas.oauth2.tokenURL, |
| 214 | OAUTH2_USERINFO: cfg.cas.oauth2.userinfoURL, |
| 215 | }, |
| 216 | }, |
| 217 | }, |
| 218 | }, |
| 219 | }, |
| 220 | }, |
| 221 | }, |
| 222 | |
| 223 | svc: kube.Service("oauth2-cas-proxy") { |
| 224 | metadata+: app.metadata("oauth2-cas-proxy"), |
| 225 | target_pod:: app.cas.deployment.spec.template, |
| 226 | }, |
| 227 | }, |
| 228 | |
| 229 | # Synapse process Deployment/StatefulSet base resource. |
| 230 | SynapseWorker(name, workerType, builder):: builder(name) { |
| 231 | local worker = self, |
| 232 | cfg:: { |
| 233 | # Configuration customization. Can contain environment substitution |
| 234 | # syntax, as used in worker_name value. |
| 235 | localConfig: { |
| 236 | worker_app: workerType, |
| 237 | worker_name: "$(POD_NAME)", |
| 238 | |
| 239 | # The replication listener on the main synapse process. |
| 240 | worker_replication_host: "synapse-replication-master", |
| 241 | worker_replication_http_port: 9093, |
| 242 | }, |
| 243 | |
| 244 | # Mount app.dataVolume in /data |
| 245 | mountData: false, |
| 246 | }, |
| 247 | |
| 248 | metadata+: app.metadata(name), |
| 249 | spec+: { |
| 250 | replicas: 1, |
| 251 | template+: { |
| 252 | spec+: { |
| 253 | volumes_: { |
| 254 | config: kube.ConfigMapVolume(app.synapseConfigMap), |
| 255 | secrets: { secret: { secretName: "synapse" } }, |
| 256 | } + { |
| 257 | [k]: { secret: { secretName: "appservice-%s-registration" % [k] } } |
| 258 | for k in std.objectFields(app.appservices) |
| 259 | } + if worker.cfg.mountData then { |
| 260 | data: kube.PersistentVolumeClaimVolume(app.dataVolume), |
| 261 | } else {}, |
| 262 | containers_: { |
| 263 | web: kube.Container("synapse") { |
| 264 | image: cfg.images.synapse, |
| 265 | command: [ |
| 266 | "/bin/sh", "-c", ||| |
| 267 | set -e |
| 268 | echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml |
| 269 | echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml |
| 270 | exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml |
| 271 | ||| |
| 272 | ], |
| 273 | ports_: { |
| 274 | http: { containerPort: 8008 }, |
| 275 | metrics: { containerPort: 9092 }, |
| 276 | replication: { containerPort: 9093 }, |
| 277 | }, |
| 278 | env_: { |
| 279 | SYNAPSE_WORKER: workerType, |
| 280 | |
| 281 | SYNAPSE_MACAROON_SECRET_KEY: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } }, |
| 282 | SYNAPSE_REGISTRATION_SHARED_SECRET: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } }, |
| 283 | WORKER_REPLICATION_SECRET: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } }, |
| 284 | POSTGRES_PASSWORD: { secretKeyRef: { name: "synapse", key: "postgres_password" } }, |
| 285 | REDIS_PASSWORD: { secretKeyRef: { name: "synapse", key: "redis_password" } }, |
| 286 | POD_NAME: { fieldRef: { fieldPath: "metadata.name" } }, |
| 287 | OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "", |
| 288 | |
| 289 | X_SECRETS_CONFIG: std.manifestYamlDoc(app.synapseSecretsConfig), |
| 290 | X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig), |
| 291 | }, |
| 292 | volumeMounts_: { |
| 293 | config: { mountPath: "/conf", }, |
| 294 | secrets: { mountPath: "/secrets" }, |
| 295 | } + { |
| 296 | [k]: { mountPath: "/appservices/%s" % [k] } |
| 297 | for k in std.objectFields(app.appservices) |
| 298 | } + if worker.cfg.mountData then { |
| 299 | data: { mountPath: "/data" }, |
| 300 | } else {}, |
| 301 | }, |
| 302 | }, |
| 303 | securityContext: { |
| 304 | runAsUser: 991, |
| 305 | runAsGroup: 991, |
| 306 | fsGroup: 991, |
| 307 | }, |
| 308 | }, |
| 309 | }, |
| 310 | }, |
| 311 | }, |
| 312 | |
| 313 | # Synapse main process |
| 314 | synapseDeployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) { |
| 315 | cfg+: { |
| 316 | # Main process doesn't need any configuration customization |
| 317 | localConfig: {} |
| 318 | }, |
| 319 | }, |
| 320 | synapseSvc: kube.Service("synapse") { |
| 321 | metadata+: app.metadata("synapse"), |
| 322 | target_pod:: app.synapseDeployment.spec.template, |
| 323 | }, |
| 324 | synapseReplicationSvc: kube.Service("synapse-replication-master") { |
| 325 | metadata+: app.metadata("synapse-replication-master"), |
| 326 | target_pod:: app.synapseDeployment.spec.template, |
| 327 | spec+: { |
| 328 | ports: [ |
| 329 | { port: 9093, name: 'replication', targetPort: 9093 }, |
| 330 | ], |
| 331 | }, |
| 332 | }, |
| 333 | |
| 334 | # Synapse generic worker deployment |
| 335 | synapseGenericWorker: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) { |
| 336 | cfg+: { |
| 337 | localConfig+: { |
| 338 | worker_listeners: [{ |
| 339 | type: "http", |
| 340 | port: 8008, |
| 341 | x_forwarded: true, |
| 342 | bind_addresses: ["::"], |
| 343 | resources: [{ names: ["client", "federation"]}], |
| 344 | }], |
| 345 | }, |
| 346 | }, |
| 347 | }, |
| 348 | synapseGenericSvc: kube.Service("synapse-generic") { |
| 349 | metadata+: app.metadata("synapse-generic"), |
| 350 | target_pod:: app.synapseGenericWorker.spec.template, |
| 351 | }, |
| 352 | |
| 353 | # Following paths can be handled by generic workers. |
| 354 | # See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md |
| 355 | synapseGenericWorkerPaths:: [ |
| 356 | "/_matrix/client/(v2_alpha|r0)/sync", |
| 357 | "/_matrix/client/(api/v1|v2_alpha|r0)/events", |
| 358 | "/_matrix/client/(api/v1|r0)/initialSync", |
| 359 | "/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync", |
| 360 | "/_matrix/client/(api/v1|r0|unstable)/publicRooms", |
| 361 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members", |
| 362 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*", |
| 363 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members", |
| 364 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state", |
| 365 | "/_matrix/client/(api/v1|r0|unstable)/account/3pid", |
| 366 | "/_matrix/client/(api/v1|r0|unstable)/keys/query", |
| 367 | "/_matrix/client/(api/v1|r0|unstable)/keys/changes", |
| 368 | "/_matrix/client/versions", |
| 369 | "/_matrix/client/(api/v1|r0|unstable)/voip/turnServer", |
| 370 | "/_matrix/client/(api/v1|r0|unstable)/joined_groups", |
| 371 | "/_matrix/client/(api/v1|r0|unstable)/publicised_groups", |
| 372 | "/_matrix/client/(api/v1|r0|unstable)/publicised_groups/", |
| 373 | # Blocked by https://github.com/matrix-org/synapse/issues/8966 |
| 374 | # "/_matrix/client/(api/v1|r0|unstable)/login", |
| 375 | # "/_matrix/client/(r0|unstable)/register", |
| 376 | # "/_matrix/client/(r0|unstable)/auth/.*/fallback/web", |
| 377 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send", |
| 378 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/", |
| 379 | "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)", |
| 380 | "/_matrix/client/(api/v1|r0|unstable)/join/", |
| 381 | "/_matrix/client/(api/v1|r0|unstable)/profile/", |
| 382 | "/_matrix/federation/v1/event/", |
| 383 | "/_matrix/federation/v1/state/", |
| 384 | "/_matrix/federation/v1/state_ids/", |
| 385 | "/_matrix/federation/v1/backfill/", |
| 386 | "/_matrix/federation/v1/get_missing_events/", |
| 387 | "/_matrix/federation/v1/publicRooms", |
| 388 | "/_matrix/federation/v1/query/", |
| 389 | "/_matrix/federation/v1/make_join/", |
| 390 | "/_matrix/federation/v1/make_leave/", |
| 391 | "/_matrix/federation/v1/send_join/", |
| 392 | "/_matrix/federation/v2/send_join/", |
| 393 | "/_matrix/federation/v1/send_leave/", |
| 394 | "/_matrix/federation/v2/send_leave/", |
| 395 | "/_matrix/federation/v1/invite/", |
| 396 | "/_matrix/federation/v2/invite/", |
| 397 | "/_matrix/federation/v1/query_auth/", |
| 398 | "/_matrix/federation/v1/event_auth/", |
| 399 | "/_matrix/federation/v1/exchange_third_party_invite/", |
| 400 | "/_matrix/federation/v1/user/devices/", |
| 401 | "/_matrix/federation/v1/get_groups_publicised", |
| 402 | "/_matrix/key/v2/query", |
| 403 | "/_matrix/federation/v1/send/", |
| 404 | ], |
| 405 | |
| 406 | # Synapse media worker. This handles access to uploads and media stored in app.dataVolume |
| 407 | synapseMediaWorker: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) { |
| 408 | cfg+: { |
| 409 | mountData: true, |
| 410 | localConfig+: { |
| 411 | worker_listeners: [{ |
| 412 | type: "http", |
| 413 | port: 8008, |
| 414 | x_forwarded: true, |
| 415 | bind_addresses: ["::"], |
| 416 | resources: [{ names: ["media"]}], |
| 417 | }], |
| 418 | }, |
| 419 | }, |
| 420 | }, |
| 421 | synapseMediaSvc: kube.Service("synapse-media") { |
| 422 | metadata+: app.metadata("synapse-media"), |
| 423 | target_pod:: app.synapseMediaWorker.spec.template, |
| 424 | }, |
| 425 | |
| 426 | riotConfig:: { |
| 427 | "default_hs_url": "https://%s" % [cfg.webDomain], |
| 428 | "disable_custom_urls": false, |
| 429 | "disable_guests": false, |
| 430 | "disable_login_language_selector": false, |
| 431 | "disable_3pid_login": true, |
| 432 | "brand": "Riot", |
| 433 | "integrations_ui_url": "https://scalar.vector.im/", |
| 434 | "integrations_rest_url": "https://scalar.vector.im/api", |
| 435 | "integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html", |
| 436 | |
| 437 | "bug_report_endpoint_url": "https://riot.im/bugreports/submit", |
| 438 | "features": { |
| 439 | "feature_groups": "labs", |
| 440 | "feature_pinning": "labs", |
| 441 | "feature_reactions": "labs" |
| 442 | }, |
| 443 | "default_federate": true, |
| 444 | "default_theme": "light", |
| 445 | "roomDirectory": { |
| 446 | "servers": [ |
| 447 | cfg.serverName, |
| 448 | ] |
| 449 | }, |
| 450 | "welcomeUserId": "@riot-bot:matrix.org", |
| 451 | "enable_presence_by_hs_url": { |
| 452 | "https://matrix.org": false |
| 453 | } |
| 454 | }, |
| 455 | |
| 456 | riotConfigMap: kube.ConfigMap("riot-web-config") { |
| 457 | metadata+: app.metadata("riot-web-config"), |
| 458 | data: { |
| 459 | "config.json": std.manifestJsonEx(app.riotConfig, ""), |
| 460 | // Standard nginx.conf, made to work when running as unprivileged user. |
| 461 | "nginx.conf": importstr "riot-nginx.conf", |
| 462 | }, |
| 463 | }, |
| 464 | |
| 465 | riotDeployment: kube.Deployment("riot-web") { |
| 466 | metadata+: app.metadata("riot-web"), |
| 467 | spec+: { |
| 468 | replicas: 1, |
| 469 | template+: { |
| 470 | spec+: { |
| 471 | volumes_: { |
| 472 | config: kube.ConfigMapVolume(app.riotConfigMap), |
| 473 | }, |
| 474 | containers_: { |
| 475 | web: kube.Container("riot-web") { |
| 476 | image: cfg.images.riot, |
| 477 | ports_: { |
| 478 | http: { containerPort: 8080 }, |
| 479 | }, |
| 480 | volumeMounts: [ |
| 481 | { |
| 482 | name: "config", |
| 483 | mountPath: "/app/config.json", |
| 484 | subPath: "config.json", |
| 485 | }, |
| 486 | { |
| 487 | name: "config", |
| 488 | mountPath: "/etc/nginx/nginx.conf", |
| 489 | subPath: "nginx.conf", |
| 490 | }, |
| 491 | ], |
| 492 | }, |
| 493 | }, |
| 494 | securityContext: { |
| 495 | // nginx:nginx |
| 496 | runAsUser: 101, |
| 497 | runAsGroup: 101, |
| 498 | }, |
| 499 | }, |
| 500 | }, |
| 501 | }, |
| 502 | }, |
| 503 | |
| 504 | riotSvc: kube.Service("riot-web") { |
| 505 | metadata+: app.metadata("riot-web"), |
| 506 | target_pod:: app.riotDeployment.spec.template, |
| 507 | }, |
| 508 | |
| 509 | wellKnown: if cfg.wellKnown then { |
| 510 | deployment: kube.Deployment("wellknown") { |
| 511 | metadata+: app.metadata("wellknown"), |
| 512 | spec+: { |
| 513 | replicas: 1, |
| 514 | template+: { |
| 515 | spec+: { |
| 516 | containers_: { |
| 517 | web: kube.Container("wellknown") { |
| 518 | image: cfg.images.wellKnown, |
| 519 | ports_: { |
| 520 | http: { containerPort: 8080 }, |
| 521 | }, |
| 522 | command: ["/app/matrix/wellknown"], |
| 523 | args: ["-hspki_disable", "-domain", cfg.webDomain], |
| 524 | }, |
| 525 | }, |
| 526 | securityContext: { |
| 527 | runAsUser: 101, |
| 528 | runAsGroup: 101, |
| 529 | }, |
| 530 | }, |
| 531 | }, |
| 532 | }, |
| 533 | }, |
| 534 | svc: kube.Service("wellknown") { |
| 535 | metadata+: app.metadata("wellknown"), |
| 536 | target_pod:: app.wellKnown.deployment.spec.template, |
| 537 | }, |
| 538 | } else {}, |
| 539 | |
| 540 | // Any appservice you add here will require an appservice-X-registration |
| 541 | // secret containing a registration.yaml file. Adding something to this |
| 542 | // dictionary will cause Synapse to not start until that secret is |
| 543 | // available - so change things carefully! |
| 544 | // If bootstrapping a new appservice, just keep it out of this dictionary |
| 545 | // until it spits you a registration YAML and you feed that to a secret. |
| 546 | appservices: {}, |
| 547 | |
| 548 | ingress: kube.Ingress("matrix") { |
| 549 | metadata+: app.metadata("matrix") { |
| 550 | annotations+: { |
| 551 | "kubernetes.io/tls-acme": "true", |
| 552 | "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod", |
| 553 | "nginx.ingress.kubernetes.io/proxy-body-size": "0", |
| 554 | "nginx.ingress.kubernetes.io/use-regex": "true", |
| 555 | }, |
| 556 | }, |
| 557 | spec+: { |
| 558 | tls: [ |
| 559 | { |
| 560 | hosts: [cfg.webDomain], |
| 561 | secretName: "synapse-tls", |
| 562 | }, |
| 563 | ], |
| 564 | rules: [ |
| 565 | { |
| 566 | host: cfg.webDomain, |
| 567 | http: { |
| 568 | paths: [ |
| 569 | { path: path, backend: app.synapseGenericSvc.name_port } |
| 570 | for path in app.synapseGenericWorkerPaths |
| 571 | ] + [ |
| 572 | { path: "/", backend: app.riotSvc.name_port }, |
| 573 | { path: "/_matrix/media/", backend: app.synapseMediaSvc.name_port }, |
| 574 | { path: "/_matrix/", backend: app.synapseSvc.name_port }, |
| 575 | |
| 576 | # Used by OpenID Connect login flow |
| 577 | { path: "/_synapse/", backend: app.synapseSvc.name_port }, |
| 578 | ] + (if cfg.cas.enable then [ |
| 579 | { path: "/_cas", backend: app.cas.svc.name_port }, |
| 580 | ] else []) + (if cfg.wellKnown then [ |
| 581 | { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port }, |
| 582 | ] else []) |
| 583 | }, |
| 584 | } |
| 585 | ], |
| 586 | }, |
| 587 | }, |
| 588 | } |