app/matrix: split matrix-ng into submodules, use kube.Namespace.Contain

matrix-ng split into multiple submodules causes some changes in keys
that might've been used for homeserver/riot configuration customization.

Migration to kube.Namespace.Contain has also caused change in Deployment
selectors (immutable fields), thus needing manual removal of these
first.

This is, as always, documented in lib/matrix-ng.libsonnet header.

Change-Id: I39a745ee27e3c55ec748818b9cf9b4e8ba1d2df5
diff --git a/app/matrix/lib/synapse.libsonnet b/app/matrix/lib/synapse.libsonnet
new file mode 100644
index 0000000..ea7bff2
--- /dev/null
+++ b/app/matrix/lib/synapse.libsonnet
@@ -0,0 +1,276 @@
+local kube = import "../../../kube/kube.libsonnet";
+
+{
+    local app = self,
+    local cfg = app.cfg,
+    cfg:: {
+        image: error "cfg.image needs to be set",
+        storageClassName: error "cfg.storrageClassName needs to be set",
+
+        # webDomain is the domain name at which synapse instance will run
+        webDomain: error "cfg.webDomain must be set",
+        # serverName is the server part of the MXID this homeserver will cover
+        serverName: error "cfg.serverName must be set",
+
+        cas: { enable: false },
+        oidc: { enable: false },
+
+        macaroonSecretKey: error "cfg.macaroonSecretKey needs to be set",
+        registrationSharedSecret: error "cfg.registationSharedSecret needs to be set",
+        workerReplicationSecret: error "cfg.workerReplicationSecret needs to be set",
+    },
+
+    ns:: error "ns needs to be provided",
+    postgres:: error "postgres needs to be provided",
+    redis:: error "redis needs to be provided",
+
+    // See matrix-ng.libsonnet for description
+    appservices:: error "appservices need to be provided",
+
+    dataVolume: app.ns.Contain(kube.PersistentVolumeClaim("synapse-data-waw3")) {
+        spec+: {
+            storageClassName: cfg.storageClassName,
+            accessModes: [ "ReadWriteOnce" ],
+            resources: {
+                requests: {
+                    storage: "50Gi",
+                },
+            },
+        },
+    },
+
+    // homeserver.yaml that will be used to run synapse (in configMap).
+    // This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
+    // deployment.
+    config:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
+        server_name: cfg.serverName,
+        public_baseurl: "https://%s" % [cfg.webDomain],
+        signing_key_path: "/secrets/homeserver_signing_key",
+        app_service_config_files: [
+            "/appservices/%s/registration.yaml" % [k]
+            for k in std.objectFields(app.appservices)
+        ],
+    } + (if cfg.cas.enable then {
+        cas_config: {
+            enabled: true,
+            server_url: "https://%s/_cas" % [cfg.webDomain],
+            service_url: "https://%s" % [cfg.webDomain],
+        },
+    } else {}),
+
+    configMap: app.ns.Contain(kube.ConfigMap("synapse")) {
+        data: {
+            "homeserver.yaml": std.manifestYamlDoc(app.config),
+            "log.config": importstr "synapse/log.config",
+        },
+    },
+
+    // homeserver-secrets.yaml contains all the templated secret variables from
+    // base homeserver.yaml passed as yaml-encoded environment variable.
+    // $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
+    secretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
+    } + (if cfg.oidc.enable then {
+        oidc_config: cfg.oidc.config {
+            enabled: true,
+            client_secret: "$(OIDC_CLIENT_SECRET)",
+        },
+    } else {}),
+
+    # Synapse process Deployment/StatefulSet base resource.
+    SynapseWorker(name, workerType, builder):: app.ns.Contain(builder(name)) {
+        local worker = self,
+        cfg:: {
+            # Configuration customization. Can contain environment substitution
+            # syntax, as used in worker_name value.
+            localConfig: {
+                worker_app: workerType,
+                worker_name: "$(POD_NAME)",
+
+                # The replication listener on the main synapse process.
+                worker_replication_host: "synapse-replication-master",
+                worker_replication_http_port: 9093,
+            },
+
+            # Mount app.dataVolume in /data
+            mountData: false,
+        },
+
+        spec+: {
+            replicas: 1,
+            template+: {
+                spec+: {
+                    volumes_: {
+                        config: kube.ConfigMapVolume(app.configMap),
+                        secrets: { secret: { secretName: "synapse" } },
+                    } + {
+                        [k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
+                        for k in std.objectFields(app.appservices)
+                    } + if worker.cfg.mountData then {
+                        data: kube.PersistentVolumeClaimVolume(app.dataVolume),
+                    } else {},
+                    containers_: {
+                        web: kube.Container("synapse") {
+                            image: cfg.image,
+                            command: [
+                                "/bin/sh", "-c", |||
+                                    set -e
+                                    echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
+                                    echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
+                                    exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
+                                |||
+                            ],
+                            ports_: {
+                                http: { containerPort: 8008 },
+                                metrics: { containerPort: 9092 },
+                                replication: { containerPort: 9093 },
+                            },
+                            env_: {
+                                SYNAPSE_WORKER: workerType,
+
+                                SYNAPSE_MACAROON_SECRET_KEY: cfg.macaroonSecretKey,
+                                SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
+                                WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
+                                POSTGRES_PASSWORD: app.postgres.cfg.password,
+                                REDIS_PASSWORD: app.redis.cfg.password,
+                                POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
+                                OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
+
+                                X_SECRETS_CONFIG: std.manifestYamlDoc(app.secretsConfig),
+                                X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
+                            },
+                            volumeMounts_: {
+                                config: { mountPath: "/conf", },
+                                secrets: { mountPath: "/secrets" },
+                            } + {
+                                [k]: { mountPath: "/appservices/%s" % [k] }
+                                for k in std.objectFields(app.appservices)
+                            } + if worker.cfg.mountData then {
+                                data: { mountPath: "/data" },
+                            } else {},
+                        },
+                    },
+                    securityContext: {
+                        runAsUser: 991,
+                        runAsGroup: 991,
+                        fsGroup: 991,
+                    },
+                },
+            },
+        },
+    },
+
+    # Synapse main process
+    main: {
+        deployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
+            cfg+: {
+                # Main process doesn't need any configuration customization
+                localConfig: {}
+            },
+        },
+        svc: app.ns.Contain(kube.Service("synapse")) {
+            target_pod:: app.main.deployment.spec.template,
+        },
+        replicationSvc: app.ns.Contain(kube.Service("synapse-replication-master")) {
+            target_pod:: app.main.deployment.spec.template,
+            spec+: {
+                ports: [
+                    { port: 9093, name: 'replication', targetPort: 9093 },
+                ],
+            },
+        },
+    },
+
+    genericWorker: {
+        # Synapse generic worker deployment
+        deployment: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
+            cfg+: {
+                localConfig+: {
+                    worker_listeners: [{
+                        type: "http",
+                        port: 8008,
+                        x_forwarded: true,
+                        bind_addresses: ["::"],
+                        resources: [{ names: ["client", "federation"]}],
+                    }],
+                },
+            },
+        },
+        svc: app.ns.Contain(kube.Service("synapse-generic")) {
+            target_pod:: app.genericWorker.deployment.spec.template,
+        },
+
+        # Following paths can be handled by generic workers.
+        # See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
+        paths:: [
+            "/_matrix/client/(v2_alpha|r0)/sync",
+            "/_matrix/client/(api/v1|v2_alpha|r0)/events",
+            "/_matrix/client/(api/v1|r0)/initialSync",
+            "/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync",
+            "/_matrix/client/(api/v1|r0|unstable)/publicRooms",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state",
+            "/_matrix/client/(api/v1|r0|unstable)/account/3pid",
+            "/_matrix/client/(api/v1|r0|unstable)/keys/query",
+            "/_matrix/client/(api/v1|r0|unstable)/keys/changes",
+            "/_matrix/client/versions",
+            "/_matrix/client/(api/v1|r0|unstable)/voip/turnServer",
+            "/_matrix/client/(api/v1|r0|unstable)/joined_groups",
+            "/_matrix/client/(api/v1|r0|unstable)/publicised_groups",
+            "/_matrix/client/(api/v1|r0|unstable)/publicised_groups/",
+            # Blocked by https://github.com/matrix-org/synapse/issues/8966
+            # "/_matrix/client/(api/v1|r0|unstable)/login",
+            # "/_matrix/client/(r0|unstable)/register",
+            # "/_matrix/client/(r0|unstable)/auth/.*/fallback/web",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/",
+            "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)",
+            "/_matrix/client/(api/v1|r0|unstable)/join/",
+            "/_matrix/client/(api/v1|r0|unstable)/profile/",
+            "/_matrix/federation/v1/event/",
+            "/_matrix/federation/v1/state/",
+            "/_matrix/federation/v1/state_ids/",
+            "/_matrix/federation/v1/backfill/",
+            "/_matrix/federation/v1/get_missing_events/",
+            "/_matrix/federation/v1/publicRooms",
+            "/_matrix/federation/v1/query/",
+            "/_matrix/federation/v1/make_join/",
+            "/_matrix/federation/v1/make_leave/",
+            "/_matrix/federation/v1/send_join/",
+            "/_matrix/federation/v2/send_join/",
+            "/_matrix/federation/v1/send_leave/",
+            "/_matrix/federation/v2/send_leave/",
+            "/_matrix/federation/v1/invite/",
+            "/_matrix/federation/v2/invite/",
+            "/_matrix/federation/v1/query_auth/",
+            "/_matrix/federation/v1/event_auth/",
+            "/_matrix/federation/v1/exchange_third_party_invite/",
+            "/_matrix/federation/v1/user/devices/",
+            "/_matrix/federation/v1/get_groups_publicised",
+            "/_matrix/key/v2/query",
+            "/_matrix/federation/v1/send/",
+        ],
+    },
+
+    # Synapse media worker. This handles access to uploads and media stored in app.dataVolume
+    mediaWorker: {
+        deployment: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
+            cfg+: {
+                mountData: true,
+                localConfig+: {
+                    worker_listeners: [{
+                        type: "http",
+                        port: 8008,
+                        x_forwarded: true,
+                        bind_addresses: ["::"],
+                        resources: [{ names: ["media"]}],
+                    }],
+                },
+            },
+        },
+        svc: app.ns.Contain(kube.Service("synapse-media")) {
+            target_pod:: app.mediaWorker.deployment.spec.template,
+        },
+    },
+}