devtools: add sourcegraph

Change-Id: Ic3c40768c761e598e0f42b17a4b9f0d4ebcb2bb2
diff --git a/devtools/kube/prod.jsonnet b/devtools/kube/prod.jsonnet
index add1899..3bdccd2 100644
--- a/devtools/kube/prod.jsonnet
+++ b/devtools/kube/prod.jsonnet
@@ -1,7 +1,9 @@
 local mirko = import "../../kube/mirko.libsonnet";
+local policies = import "../../kube/policies.libsonnet";
 
 local depotview = import "depotview.libsonnet";
 local hackdoc = import "hackdoc.libsonnet";
+local sourcegraph = import "sourcegraph.libsonnet";
 
 {
     devtools(name):: mirko.Environment(name) {
@@ -13,14 +15,23 @@
             hackdoc: hackdoc.cfg {
                 publicFQDN: "hackdoc.hackerspace.pl",
             },
+            sourcegraph: sourcegraph.cfg {
+                publicFQDN: "cs.hackerspace.pl",
+            },
         },
 
         components: {
             depotview: depotview.component(cfg.depotview, env),
             hackdoc: hackdoc.component(cfg.hackdoc, env),
+            // This is configurated manually through the web interface, q3k has an account
+            // and can create more administrative ones if needed.
+            sourcegraph: sourcegraph.component(cfg.sourcegraph, env),
         },
     },
 
     prod: self.devtools("devtools-prod") {
+        local env = self,
+        // For SourceGraph's tini container mess.
+        policy: policies.AllowNamespaceMostlySecure(env.cfg.namespace),
     },
 }
diff --git a/devtools/kube/sourcegraph.libsonnet b/devtools/kube/sourcegraph.libsonnet
new file mode 100644
index 0000000..9e2454d
--- /dev/null
+++ b/devtools/kube/sourcegraph.libsonnet
@@ -0,0 +1,101 @@
+local mirko = import "../../kube/mirko.libsonnet";
+local kube = import "../../kube/kube.libsonnet";
+
+// Deploy SourceGraph, a code serach tool. Its configuration is fully managed
+// within sourcegraph itself, including user accounts.
+
+{
+    cfg:: {
+        image: "sourcegraph/server:3.17.1",
+        publicFQDN: error "public FQDN must be set",
+        storageClassName: "waw-hdd-redundant-3",
+    },
+
+    component(cfg, env):: mirko.Component(env, "sourcegraph") {
+        local sourcegraph = self,
+        cfg+: {
+            image: cfg.image,
+            volumes+: {
+                data: kube.PersistentVolumeClaimVolume(sourcegraph.pvc.data),
+                etc: kube.PersistentVolumeClaimVolume(sourcegraph.pvc.etc),
+            },
+            securityContext: {
+                runAsUser: 0,
+                fsGroup: 70,
+            },
+            container: sourcegraph.Container("main") {
+                volumeMounts_+: {
+                    data: { mountPath: "/var/opt/sourcegraph" },
+                    etc: { mountPath: "/etc/sourcegraph" },
+                },
+                resources: {
+                    requests: {
+                        cpu: "100m",
+                        memory: "1Gi",
+                    },
+                    limits: {
+                        cpu: "1",
+                        memory: "2Gi",
+                    },
+                },
+            },
+            ports+: {
+                publicHTTP: {
+                    public: {
+                        port: 7080,
+                        dns: cfg.publicFQDN,
+                        // Authenticate as 'Anonymous' user by default. This is done in tandem
+                        // with Sourcegraphs authenticate-by-http-header feature, and is a
+                        // workaround for the lack of a public view in the self-hosted free
+                        // version of Sourcegraph.
+                        // https://twitter.com/sqs/status/1272659451292422144
+                        setHeaders: ["X-Forwarded-User Anonymous"],
+                    },
+                },
+            },
+            extraPaths: [
+                {
+                    // Redirect anonymous user settings to a service that doesn't
+                    // have any endpoints/backends.
+                    path: "/users/Anonymous/settings",
+                    backend: { serviceName: sourcegraph.blocksvc.metadata.name, servicePort: 8080 },
+                },
+            ],
+        },
+
+        blocksvc: kube.Service(sourcegraph.makeName("blocksvc")) {
+            metadata+: sourcegraph.metadata,
+            spec+: {
+                selector: null,
+                ports: [{ port: 2137, targetPort: 2137 }],
+            },
+        },
+
+        pvc: {
+            data: kube.PersistentVolumeClaim(sourcegraph.makeName("data")) {
+                metadata+: sourcegraph.metadata,
+                spec+: {
+                    storageClassName: cfg.storageClassName,
+                    accessModes: [ "ReadWriteOnce" ],
+                    resources: {
+                        requests: {
+                            storage: "40Gi",
+                        },
+                    },
+                },
+            },
+            etc: kube.PersistentVolumeClaim(sourcegraph.makeName("etc")) {
+                metadata+: sourcegraph.metadata,
+                spec+: {
+                    storageClassName: cfg.storageClassName,
+                    accessModes: [ "ReadWriteOnce" ],
+                    resources: {
+                        requests: {
+                            storage: "4Gi",
+                        },
+                    },
+                },
+            },
+        },
+    }
+}
diff --git a/kube/mirko.libsonnet b/kube/mirko.libsonnet
index b35833f..6703381 100644
--- a/kube/mirko.libsonnet
+++ b/kube/mirko.libsonnet
@@ -31,6 +31,12 @@
                     service: component.svc,
                     port: component.cfg.ports.publicHTTP[p].port,
                     dns: component.cfg.ports.publicHTTP[p].dns,
+                    // Extra headers to set.
+                    // BUG(q3k): these headers are applied to all components in the environment!
+                    // We should be splitting up ingresses where necessary to combat this.
+                    setHeaders: [],
+                    // Extra paths to add to ingress. These are bare HTTPIngressPaths.
+                    extraPaths: component.cfg.extraPaths,
                 }
                 for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
             ]
@@ -50,6 +56,9 @@
                 annotations+: {
                     "kubernetes.io/tls-acme": "true",
                     "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+                    [if env.ingressServerSnippet != null]: "nginx.ingress.kubernetes.io/server-snippet": env.ingressServerSnippet,
+                    [if std.length(env.extraHeaders) > 0 then "nginx.ingress.kubernetes.io/configuration-snippet"]:
+                        std.join("\n", ["proxy_set_header %s;" % [h] for h in env.extraHeaders]),
                 },
             },
             spec+: {
@@ -65,13 +74,27 @@
                         http: {
                             paths: [
                                 { path: "/", backend: { serviceName: p.service.metadata.name, servicePort: p.port }},
-                            ],
+                            ] + p.extraPaths,
                         },
                     }
                     for p in env.publicHTTPPorts
                 ],
             },
-        } else {}
+        } else {},
+
+        // Nginx Ingress Controller server configuration snippet to add.
+        ingressServerSnippet:: null,
+
+        // Extra request headers to add to ingress
+        extraHeaders:: std.flattenArrays([
+            std.flattenArrays([
+
+                local portc = env.components[c].cfg.ports.publicHTTP[p];
+                if std.objectHas(portc, "setHeaders") then portc.setHeaders else []
+                for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
+            ])
+            for c in std.objectFields(env.components)
+        ]),
     },
 
     Component(env, name): {
@@ -110,7 +133,7 @@
                 publicHTTP: {}, // name -> { port: no, dns: fqdn }
                 grpc: { main: 4200 }, // name -> port no
             },
-
+            extraPaths:: [],
         },
 
         allPorts:: {