cluster: fix metallb, add nginx ingress controller
diff --git a/cluster/kube/cluster.jsonnet b/cluster/kube/cluster.jsonnet
index c79a827..800e47a 100644
--- a/cluster/kube/cluster.jsonnet
+++ b/cluster/kube/cluster.jsonnet
@@ -5,6 +5,7 @@
 local metrics = import "lib/metrics.libsonnet";
 local calico = import "lib/calico.libsonnet";
 local metallb = import "lib/metallb.libsonnet";
+local nginx = import "lib/nginx.libsonnet";
 
 local Cluster(fqdn) = {
     local cluster = self,
@@ -51,6 +52,8 @@
     metrics: metrics.Environment {},
     // Metal Load Balancer
     metallb: metallb.Environment {},
+    // Main nginx Ingress Controller
+    nginx: nginx.Environment {},
 };
 
 
diff --git a/cluster/kube/lib/metallb.libsonnet b/cluster/kube/lib/metallb.libsonnet
index a00163b..fd682b7 100644
--- a/cluster/kube/lib/metallb.libsonnet
+++ b/cluster/kube/lib/metallb.libsonnet
@@ -24,6 +24,9 @@
         cfg:: {
             namespace: "metallb-system",
             namespaceCreate: true,
+            version:: "master",
+            imageController: "metallb/controller:" + cfg.version,
+            imageSpeaker: "metallb/speaker:" + cfg.version,
         },
 
         ns: if cfg.namespaceCreate then kube.Namespace(cfg.namespace),
@@ -106,5 +109,93 @@
                 name: env.roleWatcher.metadata.name,
             },
         },
+
+        deployController: kube.Deployment("controller") {
+            metadata+: {
+                namespace: cfg.namespace,
+            },
+            spec+: {
+                revisionHistoryLimit: 3,
+                template+: {
+                    spec+: {
+                        serviceAccountName: env.saController.metadata.name,
+                        terminationGracePeriodSeconds: 0,
+                        securityContext: {
+                            runAsNonRoot: true,
+                            runAsUser: 65534, # nobody
+                        },
+                        containers_: {
+                            controller: kube.Container("controller") {
+                                image: cfg.imageController,
+                                args: [ "--port=7472", "--config=config" ],
+                                ports: [
+                                    { name: "monitoring", containerPort: 7472 },
+                                ],
+                                resources: {
+                                    limits: { cpu: "100m", memory: "100Mi" },
+                                },
+                                securityContext: {
+                                    allowPrivilegeEscalation: false,
+                                    capabilities: { drop: [ "all" ] },
+                                    readOnlyRootFilesystem: true,
+                                },
+                            },
+                        },
+                    },
+                },
+            },
+        },
+
+        daemonsetSpeaker: kube.DaemonSet("speaker") {
+            metadata+: {
+                namespace: cfg.namespace,
+            },
+            spec+: {
+                template+: {
+                    spec+: {
+                        serviceAccountName: env.saSpeaker.metadata.name,
+                        hostNetwork: true,
+                        containers_: {
+                            speaker: kube.Container("speaker") {
+                                image: cfg.imageSpeaker,
+                                args: [ "--port=7472", "--config=config" ],
+                                env_: {
+                                    METALLB_NODE_NAME: kube.FieldRef("spec.nodeName"),
+                                },
+                                ports: [
+                                    { name: "monitoring", containerPort: 7472 },
+                                ],
+                                resources: {
+                                    limits: { cpu: "100m", memory: "100Mi" },
+                                },
+                                securityContext: {
+                                    allowPrivilegeEscalation: false,
+                                    capabilities: { drop: [ "all" ], add: [ "net_raw" ] },
+                                    readOnlyRootFilesystem: true,
+                                },
+                            },
+                        },
+                    },
+                },
+            },
+        },
+
+        configMap: kube.ConfigMap("config") {
+            local cm = self,
+            metadata+: {
+                namespace: cfg.namespace,
+            },
+            data: {
+                config: std.manifestYamlDoc({
+                    "address-pools": [
+                        {
+                            name: "public-v4-1",
+                            protocol: "layer2",
+                            addresses: ["185.236.240.50-185.236.240.63",],
+                        }
+                    ],
+                }),
+            },
+        },
     },
 }
diff --git a/cluster/kube/lib/nginx.libsonnet b/cluster/kube/lib/nginx.libsonnet
new file mode 100644
index 0000000..a6d10f1
--- /dev/null
+++ b/cluster/kube/lib/nginx.libsonnet
@@ -0,0 +1,212 @@
+# Deploy a per-cluster Nginx Ingress Controller
+
+local kube = import "../../../kube/kube.libsonnet";
+
+{
+    Environment: {
+        local env = self,
+        local cfg = env.cfg,
+        cfg:: {
+            image: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0",
+            namespace: "nginx-system",
+        },
+
+        metadata:: {
+            namespace: cfg.namespace,
+            labels: {
+                "app.kubernetes.io/name": "ingress-nginx",
+                "app.kubernetes.io/part-of": "ingress-nginx",
+            },
+        },
+
+        namespace: kube.Namespace(cfg.namespace),
+
+        maps: {
+            make(name):: kube.ConfigMap(name) {
+                metadata+: env.metadata,
+            },
+            configuration: env.maps.make("nginx-configuration"),
+            tcp: env.maps.make("tcp-services"),
+            udp: env.maps.make("udp-services"),
+        },
+
+        sa: kube.ServiceAccount("nginx-ingress-serviceaccount") {
+            metadata+: env.metadata,
+        },
+
+        cr: kube.ClusterRole("nginx-ingress-clusterrole") {
+            metadata+: env.metadata {
+                namespace:: null,
+            },
+            rules: [
+                {
+                    apiGroups: [""],
+                    resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"],
+                    verbs: ["list", "watch"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["nodes"],
+                    verbs: ["get"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["services"],
+                    verbs: ["get", "list", "watch"],
+                },
+                {
+                    apiGroups: ["extensions"],
+                    resources: ["ingresses"],
+                    verbs: ["get", "list", "watch"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["events"],
+                    verbs: ["create", "patch"],
+                },
+                {
+                    apiGroups: ["extensions"],
+                    resources: ["ingresses/status"],
+                    verbs: ["update"],
+                },
+            ],
+        },
+
+        crb: kube.ClusterRoleBinding("nginx-ingress-clusterrole-nisa-binding") {
+            metadata+: env.metadata {
+                namespace:: null,
+            },
+            roleRef: {
+                apiGroup: "rbac.authorization.k8s.io",
+                kind: "ClusterRole",
+                name: env.cr.metadata.name,
+            },
+            subjects: [
+                {
+                    kind: "ServiceAccount",
+                    name: env.sa.metadata.name,
+                    namespace: env.sa.metadata.namespace,
+                },
+            ],
+        },
+
+        role: kube.Role("nginx-ingress-role") {
+            metadata+: env.metadata,
+            rules : [
+                {
+                    apiGroups: [""],
+                    resources: ["configmaps", "pods", "secrets", "namespaces"],
+                    verbs: ["get"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["configmaps"],
+                    resourceNames: ["ingress-controller-leader-nginx"],
+                    verbs: ["get", "update"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["configmaps"],
+                    verbs: ["create"],
+                },
+                {
+                    apiGroups: [""],
+                    resources: ["endpoints"],
+                    verbs: ["get"],
+                },
+            ],
+        },
+
+        roleb: kube.RoleBinding("nginx-ingress-role-nisa-binding") {
+            metadata+: env.metadata,
+            roleRef: {
+                apiGroup: "rbac.authorization.k8s.io",
+                kind: "Role",
+                name: env.role.metadata.name,
+            },
+            subjects: [
+                {
+                    kind: "ServiceAccount",
+                    name: env.sa.metadata.name,
+                    namespace: env.sa.metadata.namespace,
+                },
+            ],
+        },
+
+        service: kube.Service("ingress-nginx") {
+            metadata+: env.metadata,
+            target_pod:: env.deployment.spec.template,
+            spec+: {
+                type: "LoadBalancer",
+                ports: [
+                    { name: "http", port: 80, targetPort: 80, protocol: "TCP" },
+                    { name: "https", port: 443, targetPort: 443, protocol: "TCP" },
+                ],
+            },
+        },
+
+        deployment: kube.Deployment("nginx-ingress-controller") {
+            metadata+: env.metadata,
+            spec+: {
+                replicas: 1,
+                template+: {
+                    spec+: {
+                        serviceAccountName: env.sa.metadata.name,
+                        containers_: {
+                            controller: kube.Container("nginx-ingress-controller") {
+                                image: cfg.image,
+                                args: [
+                                    "/nginx-ingress-controller",
+                                    "--configmap=%s/%s" % [cfg.namespace, env.maps.configuration.metadata.name],
+                                    "--tcp-services-configmap=%s/%s" % [cfg.namespace, env.maps.tcp.metadata.name],
+                                    "--udp-services-configmap=%s/%s" % [cfg.namespace, env.maps.udp.metadata.name],
+                                    "--publish-service=%s/%s" % [cfg.namespace, env.service.metadata.name],
+                                    "--annotations-prefix=nginx.ingress.kubernetes.io",
+                                ],
+                                env_: {
+                                    POD_NAME: kube.FieldRef("metadata.name"),
+                                    POD_NAMESPACE: kube.FieldRef("metadata.namespace"),
+                                },
+                                ports_: {
+                                    http: { containerPort: 80 },
+                                    https: { containerPort: 443 },
+                                },
+                                livenessProbe: {
+                                    failureThreshold: 3,
+                                    httpGet: {
+                                        path: "/healthz",
+                                        port: 10254,
+                                        scheme: "HTTP",
+                                    },
+                                    initialDelaySeconds: 10,
+                                    periodSeconds: 10,
+                                    successThreshold: 1,
+                                    timeoutSeconds: 10,
+                                },
+                                readinessProbe: {
+                                    failureThreshold: 3,
+                                    httpGet: {
+                                        path: "/healthz",
+                                        port: 10254,
+                                        scheme: "HTTP",
+                                    },
+                                    periodSeconds: 10,
+                                    successThreshold: 1,
+                                    timeoutSeconds: 10,
+                                },
+                                securityContext: {
+                                    allowPrivilegeEscalation: true,
+                                    capabilities: {
+                                        drop: ["ALL"],
+                                        add: ["NET_BIND_SERVICE"],
+                                    },
+                                    runAsUser: 33,
+                                },
+                            },
+                        },
+                    },
+                },
+            },
+        },
+    },
+}