Merge "app/matrix: matrix-ng - synapse deployment cleanup"
diff --git a/.bazelrc b/.bazelrc
index 419641e..dc92a13 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -1,7 +1,7 @@
 # https://github.com/bazelbuild/rules_docker/issues/842
-build --host_force_python=PY2
-test --host_force_python=PY2
-run --host_force_python=PY2
+build --host_force_python=PY3
+test --host_force_python=PY3
+run --host_force_python=PY3
 build --stamp
 build --workspace_status_command=./bzl/workspace-status.sh
 test --build_tests_only
diff --git a/app/matrix/matrix.hackerspace.pl.jsonnet b/app/matrix/matrix.hackerspace.pl.jsonnet
index d71dd84..6488fbb 100644
--- a/app/matrix/matrix.hackerspace.pl.jsonnet
+++ b/app/matrix/matrix.hackerspace.pl.jsonnet
@@ -26,8 +26,7 @@
         "irc-freenode": irc.AppServiceIrc("freenode") {
             cfg+: {
                 image: cfg.images.appserviceIRC,
-                // TODO(q3k): move this appservice to waw-hdd-redundant-3
-                storageClassName: "waw-hdd-paranoid-2",
+                storageClassName: "waw-hdd-redundant-3",
                 metadata: app.metadata("appservice-irc-freenode"),
                 // TODO(q3k): add labels to blessed nodes
                 nodeSelector: {
diff --git a/app/matrix/wellknown/BUILD b/app/matrix/wellknown/BUILD
index 1cf4138..b44ac52 100644
--- a/app/matrix/wellknown/BUILD
+++ b/app/matrix/wellknown/BUILD
@@ -1,3 +1,4 @@
+load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
 load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
 
 go_library(
@@ -23,3 +24,28 @@
     embed = [":go_default_library"],
     deps = ["@com_github_go_test_deep//:go_default_library"],
 )
+
+container_layer(
+    name = "layer_bin",
+    files = [
+        ":wellknown",
+    ],
+    directory = "/app/matrix/",
+)
+
+container_image(
+    name = "runtime",
+    base = "@prodimage-bionic//image",
+    layers = [
+        ":layer_bin",
+    ],
+)
+
+container_push(
+    name = "push",
+    image = ":runtime",
+    format = "Docker",
+    registry = "registry.k0.hswaw.net",
+    repository = "q3k/wellknown",
+    tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
+)
diff --git a/app/matrix/wellknown/README.me b/app/matrix/wellknown/README.md
similarity index 100%
rename from app/matrix/wellknown/README.me
rename to app/matrix/wellknown/README.md
diff --git a/bgpwtf/invoice/BUILD.bazel b/bgpwtf/invoice/BUILD.bazel
index 900f0b3..950474e 100644
--- a/bgpwtf/invoice/BUILD.bazel
+++ b/bgpwtf/invoice/BUILD.bazel
@@ -1,4 +1,4 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
 
 go_library(
     name = "go_default_library",
@@ -31,3 +31,9 @@
     embed = [":go_default_library"],
     visibility = ["//visibility:public"],
 )
+
+go_test(
+    name = "go_default_test",
+    srcs = ["calc_test.go"],
+    embed = [":go_default_library"],
+)
diff --git a/bgpwtf/invoice/calc.go b/bgpwtf/invoice/calc.go
index 9c411da..72933d3 100644
--- a/bgpwtf/invoice/calc.go
+++ b/bgpwtf/invoice/calc.go
@@ -1,17 +1,23 @@
 package main
 
 import (
+	"sort"
 	"time"
 
 	pb "code.hackerspace.pl/hscloud/bgpwtf/invoice/proto"
 )
 
+// calculateInvoiceData applies all business logic to populate an Invoice's
+// denormalized fields from its InvoiceData.
 func calculateInvoiceData(p *pb.Invoice) {
+	// Populate default unit.
+	// TODO(q3k): this really should be done on invoice submit instead.
 	p.Unit = p.Data.Unit
 	if p.Unit == "" {
 		p.Unit = "€"
 	}
 
+	// Calculate totals.
 	p.TotalNet = 0
 	p.Total = 0
 	for _, i := range p.Data.Item {
@@ -24,6 +30,21 @@
 		i.Total = rowTotal
 	}
 
+	// Calculate due date.
 	due := int64(time.Hour*24) * p.Data.DaysDue
 	p.DueDate = time.Unix(0, p.Date).Add(time.Duration(due)).UnixNano()
+
+	// Denormalize Items' GTUCodes into the Invoice's summary GTU codes.
+	codeSet := make(map[pb.GTUCode]bool)
+	for _, item := range p.Data.Item {
+		for _, code := range item.GtuCode {
+			codeSet[code] = true
+		}
+	}
+	var codes []pb.GTUCode
+	for c, _ := range codeSet {
+		codes = append(codes, c)
+	}
+	sort.Slice(codes, func(i, j int) bool { return codes[i] < codes[j] })
+	p.GtuCode = codes
 }
diff --git a/bgpwtf/invoice/calc_test.go b/bgpwtf/invoice/calc_test.go
new file mode 100644
index 0000000..e8607c9
--- /dev/null
+++ b/bgpwtf/invoice/calc_test.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+	"testing"
+	"time"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/invoice/proto"
+)
+
+// Fake test data for test in this file.
+var (
+	itemInternet1 = &pb.Item{
+		Title:     "Dostęp do Internetu - Umowa FOOBAR/10 - Opłata Abonentowa 2020/08",
+		Count:     1,
+		UnitPrice: 4200,
+		Vat:       23000,
+	}
+	itemInternet2 = &pb.Item{
+		Title:     "Dostęp do Internetu - Umowa FOOBAR/10 - Opłata Abonentowa 2020/09",
+		Count:     1,
+		UnitPrice: 4200,
+		Vat:       23000,
+	}
+	itemHardware = &pb.Item{
+		Title:     "Thinkpad x230, i7, 16GB RAM, Refurbished",
+		Count:     1,
+		UnitPrice: 10000,
+		Vat:       23000,
+		GtuCode:   []pb.GTUCode{pb.GTUCode_GTU_05},
+	}
+	billing1 = []string{
+		"Wykop Sp. z o. o.",
+		"Zakręt 8",
+		"60-351 Poznań",
+	}
+	billing2 = []string{
+		"TEH Adam Karolczak",
+		"Zgoda 18/2",
+		"95-200 Pabianice",
+	}
+	vatID1 = "PL8086133742"
+	vatID2 = "DE133742429"
+	iban   = "PL 59 1090 2402 9746 7956 2256 2375"
+	swift  = "WLPPZLPAXXX"
+)
+
+func TestCalculate(t *testing.T) {
+	now := time.Now()
+	for _, te := range []struct {
+		description string
+		data        *pb.InvoiceData
+		want        *pb.Invoice
+	}{
+		{
+			description: "Invoice without JPK_V7 codes",
+			data: &pb.InvoiceData{
+				Item:            []*pb.Item{itemInternet1, itemInternet2},
+				InvoicerBilling: billing1,
+				CustomerBilling: billing2,
+				InvoicerVatId:   vatID1,
+				CustomerVatId:   vatID2,
+				Date:            now.UnixNano(),
+				DaysDue:         21,
+				Iban:            iban,
+				Swift:           swift,
+				Unit:            "PLN",
+			},
+			want: &pb.Invoice{
+				TotalNet: 8400,
+				Total:    10332,
+				Unit:     "PLN",
+			},
+		},
+		{
+			description: "Invoice with JPK_V7 codes",
+			data: &pb.InvoiceData{
+				// Repeated item with GTU code GTU_5, to ensure result doesn't
+				// have repeated codes.
+				Item:            []*pb.Item{itemInternet1, itemHardware, itemHardware},
+				InvoicerBilling: billing1,
+				CustomerBilling: billing2,
+				InvoicerVatId:   vatID1,
+				CustomerVatId:   vatID2,
+				Date:            now.UnixNano(),
+				DaysDue:         21,
+				Iban:            iban,
+				Swift:           swift,
+				Unit:            "PLN",
+			},
+			want: &pb.Invoice{
+				TotalNet: 24200,
+				Total:    29766,
+				Unit:     "PLN",
+				GtuCode:  []pb.GTUCode{pb.GTUCode_GTU_05},
+			},
+		},
+	} {
+		t.Run(te.description, func(t *testing.T) {
+			invoice := &pb.Invoice{
+				Data: te.data,
+				Date: te.data.Date,
+			}
+			calculateInvoiceData(invoice)
+			if want, got := te.want.TotalNet, invoice.TotalNet; want != got {
+				t.Errorf("got TotalNet %d, wanted %d", got, want)
+			}
+			if want, got := te.want.Total, invoice.Total; want != got {
+				t.Errorf("got Total %d, wanted %d", got, want)
+			}
+			if want, got := te.want.Unit, invoice.Unit; want != got {
+				t.Errorf("got Unit %q, wanted %q", got, want)
+			}
+			due := time.Duration(int64(time.Hour*24) * te.data.DaysDue)
+			if want, got := now.Add(due).UnixNano(), invoice.DueDate; want != got {
+				t.Errorf("got DueDate %d, wanted %d", got, want)
+			}
+			if want, got := len(te.want.GtuCode), len(invoice.GtuCode); want != got {
+				t.Errorf("got %d GTU codes, wanted %d", got, want)
+			} else {
+				for i, want := range te.want.GtuCode {
+					got := invoice.GtuCode[i]
+					if want != got {
+						t.Errorf("GTU code %d: wanted %s, got %s", i, want.String(), got.String())
+					}
+				}
+			}
+		})
+	}
+}
diff --git a/bgpwtf/invoice/main.go b/bgpwtf/invoice/main.go
index 5133010..ae17dbb 100644
--- a/bgpwtf/invoice/main.go
+++ b/bgpwtf/invoice/main.go
@@ -130,6 +130,10 @@
 	return &pb.SealInvoiceResponse{}, nil
 }
 
+func (s *service) GetInvoices(req *pb.GetInvoicesRequest, srv pb.Invoicer_GetInvoicesServer) error {
+	return status.Error(codes.Unimplemented, "unimplemented")
+}
+
 func init() {
 	flag.Set("logtostderr", "true")
 }
diff --git a/bgpwtf/invoice/model.go b/bgpwtf/invoice/model.go
index fcf4aba..548d8ff 100644
--- a/bgpwtf/invoice/model.go
+++ b/bgpwtf/invoice/model.go
@@ -75,7 +75,7 @@
 			invoice_id, final_uid, sealed_time
 		) values (
 			?,
-			( select printf("%04d", ifnull( (select final_uid as v from invoice_seal order by final_uid desc limit 1), 20000) + 1 )),
+			( select printf("%04d", ifnull( (select final_uid as v from invoice_seal order by final_uid desc limit 1), 21000) + 1 )),
 			?
 		)
 	`
diff --git a/bgpwtf/invoice/proto/BUILD.bazel b/bgpwtf/invoice/proto/BUILD.bazel
index 51f85fe..2eeae64 100644
--- a/bgpwtf/invoice/proto/BUILD.bazel
+++ b/bgpwtf/invoice/proto/BUILD.bazel
@@ -1,3 +1,4 @@
+load("@rules_proto//proto:defs.bzl", "proto_library")
 load("@io_bazel_rules_go//go:def.bzl", "go_library")
 load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
 
diff --git a/bgpwtf/invoice/proto/invoice.proto b/bgpwtf/invoice/proto/invoice.proto
index ee2b9d8..70e6923 100644
--- a/bgpwtf/invoice/proto/invoice.proto
+++ b/bgpwtf/invoice/proto/invoice.proto
@@ -264,12 +264,12 @@
     // If sealed, otherwise 'proforma'.
     string final_uid = 3;
     int64 date = 4;
-    int64 due_date = 5;
     // Denormalized fields follow.
+    int64 due_date = 5;
     uint64 total_net = 6;
     uint64 total = 7;
     string unit = 8;
-    repeated GTUCode gtu_codes = 10;
+    repeated GTUCode gtu_code = 10;
     // Next tag: 11;
 }
 
@@ -312,9 +312,48 @@
 message SealInvoiceResponse {
 }
 
+message GetInvoicesRequest {
+    // Return all invoices issued in a given year.
+    message ForYear {
+        int32 year = 1;
+    }
+    // Return all invoices issued in a given month of a year.
+    message ForMonth {
+        int32 year = 1;
+        int32 month = 2;
+    }
+
+    oneof range {
+        ForYear for_year = 1;
+        ForMonth for_month = 2;
+    }
+}
+
+message GetInvoicesResponse {
+    // Each chunk may contain an arbitrary amount of invoices, and each
+    // GetInvoices request may return an arbitrary amount of
+    // GetInvoicesResponses in a stream.
+    repeated Invoice invoice = 1;
+}
+
 service Invoicer {
+    // Create an invoice with given data, returning UID. The newly created
+    // invoice is created as a proforma invoice and not yet sealed, ie. not
+    // given a unique, sequential ID.
     rpc CreateInvoice(CreateInvoiceRequest) returns (CreateInvoiceResponse);
+
+    // Get invoice details for a given UID.
     rpc GetInvoice(GetInvoiceRequest) returns (GetInvoiceResponse);
+
+    // Return chunks of a rendered PDF for a given UID. If the invoice is
+    // sealed, the stored PDF will be returned, otherwise a PDF will be
+    // rendered on the fly.
     rpc RenderInvoice(RenderInvoiceRequest) returns (stream RenderInvoiceResponse);
+
+    // Seal invoice, ie. assign it a sequential ID and render it to an
+    // immutable PDF for audit purposes.
     rpc SealInvoice(SealInvoiceRequest) returns (SealInvoiceResponse);
+
+    // Return a summarized detail of invoice data for a given filter.
+    rpc GetInvoices(GetInvoicesRequest) returns (stream GetInvoicesResponse);
 }
diff --git a/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix b/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
index f97f1eb..aea2b80 100644
--- a/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
+++ b/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
@@ -1,50 +1,51 @@
 -----BEGIN PGP MESSAGE-----
 
-hQEMAzhuiT4RC8VbAQf+La/HpIwKtvmb8ZNhG9lDxmAxibz3G1WgDS5SMMuP3BnO
-IK+sOR5MOfae+Cry82f4Nlg24cJDe4963vNq0g+MiLnGpxmLHb+plQxkf5S2yU55
-+dKwa/usTJZLa97yqsh8+vdfAnA+C3BaLhhDOO5sgZkArpgMGQ6trUsn+6vmhAmZ
-LLw0rlQjz/IvLu8WW8RqZL17ruXOov4qF0NB6yroZAN4j5spGpq65J2puAZb+rI3
-ZGTm8ie6zb7IlW8RWG96TWk48KcYgLnivxVJwll+zn9Mb0g2AzJWyN7bOy1t1wHs
-yYz5xyB8H/Nqhv2dLfHUtXjQF9uh0ivWewWgzzggjIUBDANcG2tp6fXqvgEH/iz7
-OYsZ+Mj42CYjNntwauHCfmGrvSJwVgco+hzfNmAT2wCHwoZKdktLq1oUuyRrPPwK
-nSe/L249Q7Ick47Lk+NrCIjCFsz03ZzTrEeuY3Yav5m9hzifwLJaeTDy+A+jde2D
-zg3gxuAeaaXfrFmKXwfhpf5/t2LCVFtmdmMg+1ButhgWKMDJL6wQVX5EZjHF6PU0
-n2X+XkXFa807FrZ4sI29pxEmakKtFQHsPlP9G2ydBrz2EmcrSgN+uKorAqEtwUBJ
-zuT7Qg4Ewb2hcPTOR4xLV/DCY80IQyWKEcSj1Hjtes3OA5osDlJWeZvoNUFlXSli
-Wc8S087+7fqmRIohU7+FAgwDodoT8VqRl4UBD/9OivAOdH/LxU0WpT7Bofcyw6iA
-QQZ9kN5SLAY6S9pT274QwHkF/sQGt90Gjq0kIMjm4gc5Cx4kAYoT59Ep/R6HmHC7
-u7D+GuByyiSgAa/YEk3tIAI6/gOeVx5BbG6QNL+ysI8XgRrpRWaEfz8OOpBIALgC
-oBSKBaqxApCejkdO4K3P+lT3jJQ2uOT1i72eBhKOpBA1Vz4BWM/w9HFkhw37sWXc
-i34yrlXdldVpJxbKXMyg4f5hz9OrDCTkLce/dwlZpFGFF+JArFLJ2iLytlFOlTDX
-g5yZLSfziw/B5q1mBevJoSFRgZys3z8xmVD/ETS+1DehJjQsLKo/QlpLqx2dj2qd
-9R5OVGMc2s9YmEAee5aoy8z+egZwo0n48Y10LKwro4XCev2ozy73m3nx7uydIwI6
-jl4qYk6qYASJVK9qrBeB//chxrATLS2PgJqO4oKl8EBvmWeUYR4qaOE/p8QlcLLQ
-Vm5Fm+iITPsSNZgK3AGI0S8CoRLPi/o3C5VvAlEt+bfEuTpR8zzeZLfDqOJ94kOp
-4vKaoQTRzHk8itMkdMeBNBAIGh7fEMqiu3kqtlMunp74uKoY/uzh2rhjBzE7quKB
-x3gTgAPnGTiIFvP1ZaNMjsW9OCsOU2Fkv9syLrpkMuGOfCOLSULQG3GYxG4kybzo
-8w6SQ/y/fDermA+6+YUCDAPiA8lOXOuz7wEP/AmhfKpJoizMTULib/Xf7+A9x34K
-eeLsLPnoRZFdO2pTv7ZtNryOmzVPsXjiuwgO9ULrW31WDeb9KNmLk9JyWji6iED1
-wbbSkFT0yzsMD1BjmxNdd9WHK55sxvtX+yH2MHLIepLXAU0emfIBsW9kMuJ01965
-JlCV+TtYJnUGARoLCBnUzvk1VpjnwD3ed0E5atrwk0kRNIPoM1N2DAIxaIua4CX1
-XCylBgMuNW/D86lLpCMHE3LA1FPqNyFe7eyspWGwkDTRK/YZbq6zUGXrNhftK2vV
-8CA0E2dKIbi+TsrvJVb2X5T3Mf4EsTd2oqclVGWttfbXYs5Rv0vf63Z01VsiWL7A
-sSZJlfVnCuKYvZnB3Am1vZtlk6mHqQRJ3DZM0vp/AkjZ5x6rPwy8wRaKpVD06yZO
-N3h4XLsidnteUZBbamQEKjfZoo0zw27TG+gqT+MGIaQWieuCMBm5amiwUJvRsfH0
-S/M50nuikiXl67xEd+vNYGiacH2tfOorUKzoAebEM2EkB3xb97kU3X+OyImre1S6
-Gs5TC1QH6dtd6yKQAPoQZtK0EQI6B+Rw+zH9IAQgPz5C9oUgRd62GJzgonYOJzFH
-jzOARKKSRen+bsrm3jldfJ4sPl6sRWRHWVLsWh4AJYK8iAqhu88dYOc1nksslmGn
-fOV80T8Acty1wshO0ukB34maKPBSF+rs1oJ/IHmqXHfx5mfjsdof56tgAyQ/rIUE
-ulpDR8ytvoviXzH550Ydf3w+bFrEPHkEq0IBXi31BpM7PUiN1xryibdY81ukAjSS
-KRQV5RFS6lwdJOGAFkE0gJ7xES8P7FiC+wKHE78WdvNm0trZiiaJNrMebkitw6K4
-rJMpZj0mhCtKryYmz3L857QFYeUJ9M95Y4/Ib6aM7dn/fJMOeUOVtv0wESQ44dwQ
-CPDjfQdBXrshRWOYMZBsJjI0J0oBc0BRrbHo60LNixqL36CKWyCRNKNVKyZfv23G
-sBeoSpFOHxiiATzlgobufJyhwgU4k8idMt8+lMl783FN/0gRYs14ziYdKNYP2ctG
-Bci38D19tWoLidIuyR9l96IGWmzxey/wQI6UaRFFQR5Y9Po40WCq6XZIbeT3wDRF
-0ynTxlQ0TP6802iglC93S26tLL3QN/nF22gOxKIEC/tGMtv1vJRWfC1svPitDMD+
-Y91xJS+rnzzcEMeDvZmgA5JRGn4YCL0RK1AWgMmQB1zBmPEXpIpFGjQiWF/s2Cg4
-l21wEMmVO8mTVk2Rygaf6wvyf2TmblGPTbb2zfrq7lKtrYFR34oIB9FY9nqJoqDz
-g2OZhq59qIjTgbvPjkPdvqqi0Vg7CQ4zBz+zhOLrb5U0NPHvMtvwZn1bW4dzSFQr
-IBIk63tVMLrfMvNScOAviyK7QoHOhHXElSofTBSUtlMBAE8vusHE/mYic92BVlsu
-TofitNVONyJHKst3jKVU68O3pWHp2+7CyA8gzf6ieH33UQU=
-=1q6K
+hQEMAzhuiT4RC8VbAQgAtWAfrUok8EKsWRY2FEZbNeawMXXpuBrMDARxNY1xhV6b
+3Pxz+148na8+KQR0asleOO+9qPKQP0N+HA6W6SJEgrfQ7q3XKdsaZVMJjNaRhb9j
+eOAe1MLr9Ps0Lx93nknI6bPsX8odpa7oNQYqI7QWBphQLVtdBKaYVkoGN7P+xHlu
+j2HDyD3TOfNH2UxywWOMAJixYkcZ6/v1KNS4JsDUe4b5Tf/IegX2LSoY9qH60Psm
+BFMCmYmGg/MlFyQpyo/CYebJu9BWMHHcj2o29W+OaJqCCYVC+XR+h5EtssnPwedK
+D7A5jLu83pzonZQxiheP0JWSrfMlo8HZNcbhZw0IIoUBDANcG2tp6fXqvgEH/1Gp
+3qsm/MfFtRoHzbRaOEIofaKjv79PhdN8p+9tr4J31oMnuJNIVWozW8R1YBzyL6Pg
+UeKZaAsW9zP9+HhQw8ZahX1A3Paz3LhO9By4wkgOt5up7s5QS2klWWUBaF8AIxKF
+FBoNJcc52VH5yBXyiGd5UAHigKRldwE9yIzWKzt4/60/NtVzkfK6j8KFdRyLJZFE
+0IqRjbFxdvMr4hyc1h5wibBonWKRIDEvXqIeOWdUbDHqTekJcXVUrtxw5u8rry2F
+XaqN9FM+++QPFX4hrbIJe1w7/gINH72PnAPApN/MfUaQsGE/noX3CjaINspB+Nhq
+AwUITmBSjdZ0vuEGoWaFAgwDodoT8VqRl4UBD/9d9rQRlpfKi6K3WlLJra4OEtQm
++RTE8I7OQYCQ/C9QFPw1ux0RtCTQF/uL8nKzoWG+LbUgKVRoZQSV1k4L9QZ/YKhd
+5AC4YT76lPCxCemuSfCQ3sb0I4uEa3JiXeBSVPAfwcS3nXhnyhdFLGhdmAs1hnzo
+E9wD4oxk6yHhozWH1QwJ4Syioywmoy5kKJ4tQREpceCvyoZD2b2h4kLUiZKkBZ+a
+Nzo3AQdQJF/Yr4BL2afMwAwd9x9zpdu/LL7k6INsXuM0S1I8ipdBpiKpZTQcAwK9
+P6RF8p8lKpMX5u1PWq7CMJMJGRQnKAXCJoKYathUygePewq3muOx2Cn4fjNGLT0/
+O795/QtCDI9Q4xsY7/uMLvqpr6skFXecH304Mp23unTBK+knBkWRYpaoA9Gfri8j
+ELMkPvsLCjE9gg3vUdgyPDq3Ov2XDXvgsNW36ghiUYmBRhfoXHG/TuG2iEGP32mM
+coW+4q7DpcyjYUXeh80PLYWe68gCmQ4XN8oh2+xPhFwvjKOidW8o1TbRAdf+A+yu
+d6EvKSzSpG8SABrlCofD7HPyrAbwmTP2SdmoamCT8NSiLoXQZk2ZO3xSYp8gR4aP
+fDENcYtqipDyMTPRfPEPMjYnn+AR4d4UD0jowcaeKThg/fKmzwP0N4pm6uaR9pJ6
++MwKToisQk+tLIv44YUCDAPiA8lOXOuz7wEP/0xTmMro6Jb7LG8lOykEomYBrtT+
+meWftSGjQgSQITkw/cVqIzpYTy7HRtP8luiJyvh1Mt8Fm3MlZLgorp0TtxaUPq0y
+yNUWkDXOIu0pcmX1c38rmEIMvnUcREyJlcFv7Y8XAYf8ZP4TUfA3t+wZaFYoiV0M
+Ai4tHGCIDdCf4Q9fSEFIU+UwJ9/zuBglMPJ74x3IPpEnlKtqkgzpT6kUGypIFKA9
+n83ycOiu78FTHQF3ULY05Of2cBKTtNYc0R71QIyHovh1bT+o3EgMwxcKfzZjBAgI
+lGtJG1+mi44FS90zUPWVtRRf0TwCFXw1HSCS7nbL+5831WdcdhWVgWp2RCczVORv
+G4q5SP1yunBOi7HKVRHGW16bAhM/OOgyJ2lLqRSglvrYC6ympXvw85Y6xXKiG9tC
+wAKSJOvkSZ8xoahoVVQwbbD6uiQqpTzl3befxkLd0uz9U43pfW0z9x+r38ussUbM
+bnLMCwGJ9N7dZevN6GkX886S1Tk0CwM8H39tRyb9xowcc/D9Hoy+/jOq1YOtpHXA
+NXc7gbDppTlRJqbrniH9YxIG00+x1K1RKMBjLgBVOhT64n3U0DIBxvVQLmg9kQUN
+6tiDe0toQUhXVV5eu5PivoPuDcHZ7+4FdoAtIeQ7Y2A34HZ4KXSvF3qkf3RPHWvo
+l77A/4GZzFCQW38E0ukBD1W1hTHZcwNaPwRGtD1rvM1f6o7c60sDAAWkMeKzsFRd
+0ADg08xVj447WnZtgy7j2LPd6JeaodHumw95Cwkdd06lupYwG/CV/ZmvB5Ae+llu
+3mPewaSfpmcWvg0QtfDnv8NWW/BQSnPCjjPlVAdVZxm/uRHNKWSApK1UrfDK463e
+Dg60CXE64j/7bX9TCSf7KVHqan08IMvS8i3gqKYNv/9yGR9EnpNvVWv59zHZao64
+yHO/NHPBXgQk+Vn+P2iER6/bZaMkq7HdjkM9KeTBZUmvzfQ69wYhlrTI5HvkLHNP
+FV+oqwgG9KwPtr0zzBp9fyjfMw6081NedsMH+GwiNvibM5ryBULTnziJ4Hm0MbXH
+Yg/XFi/mMwq3rGq+ZauyjSjIuQXxZMQLpuzSWRQx5thwDePGJ7Dx5nGEJQC5NS/x
+HsPsvKEtl/n/CY5x3qV1NScCTKlXiD7mGE/whuO/Aoun+tHXNah/kwXFWyeQkHkS
+xpjm10vq4s6CvieCDKc+QenxpLt1PGBL7yvGVBXBTQbp1N4laYATXzTFr9b/RG5s
+C0aWWLuraflILCD0wxDuZnFVrPVmsfMp86+donIaNvBFwrYyMw9cnVWGoVAIG4LV
+B9vfZaVzhbNgynnwu1JifZzwIytLBHsemRMq5vRUE8ju0z9FP9hhqHLu1pF1dJH2
+fyqFYL44br5M0c2f2xnzGpsca9C7mDBXN5ktR1ts+fHdLELsqg9SwAtqqWCEB/jQ
+T96vkVEydwQ/mVqCtPLGk3NVJ7NjUVISFvQAj4w9vG9fCgD1NIHJco0VgIkvn9Yu
+2pPCrY+NVeibz9vaaUIuOf6lXZCZFOyVTZIPXQyhumel/f0MDs3Lx5ZA3rUL6jS+
+=nGiW
 -----END PGP MESSAGE-----
diff --git a/cluster/admitomatic/BUILD.bazel b/cluster/admitomatic/BUILD.bazel
new file mode 100644
index 0000000..5cb23ab
--- /dev/null
+++ b/cluster/admitomatic/BUILD.bazel
@@ -0,0 +1,28 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "ingress.go",
+        "main.go",
+    ],
+    importpath = "code.hackerspace.pl/hscloud/cluster/admitomatic",
+    visibility = ["//visibility:private"],
+    deps = [
+        "//go/mirko:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@io_k8s_api//admission/v1beta1:go_default_library",
+    ],
+)
+
+go_binary(
+    name = "admitomatic",
+    embed = [":go_default_library"],
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["ingress_test.go"],
+    embed = [":go_default_library"],
+)
diff --git a/cluster/admitomatic/ingress.go b/cluster/admitomatic/ingress.go
new file mode 100644
index 0000000..42cab98
--- /dev/null
+++ b/cluster/admitomatic/ingress.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+	"fmt"
+	"strings"
+
+	admission "k8s.io/api/admission/v1beta1"
+)
+
+// ingressFilter is a filter which allows or denies the creation of an ingress
+// backing a given domain with a namespace. It does so by operating on an
+// explicit list of allowed namespace/domain pairs, where each domain is either
+// a single domain or a DNS wildcard at a given root.
+// By default every domain is allowed in every namespace. However, the moment
+// an entry is added for a given domain (or wildcard that matches some
+// domains), this domain will only be allowed in that namespace.
+//
+// For example, with the given allowed domains:
+// -  ns: example, domain: one.example.com
+// -  ns: example, domain: *.google.com
+// The logic will be as follows:
+// -  one.example.com will be only allowed in the example namespace
+// -  any .google.com domain will be only allowed in the example namespace
+// -  all other domains will be allowed everywhere.
+//
+// This logic allows for the easy use of arbitrary domains by k8s users within
+// their personal namespaces, but allows critical domains to only be allowed in
+// trusted namespaces.
+//
+// ingressFilter can be used straight away after constructing it as an empty
+// type.
+type ingressFilter struct {
+	// allowed is a map from namespace to list of domain matchers.
+	allowed map[string][]*domain
+}
+
+// domain is a matcher for either a single given domain, or a domain wildcard.
+// If this is a wildcard matcher, any amount of dot-delimited levels under the
+// domain will be permitted.
+type domain struct {
+	// dns is either the domain name matched by this matcher (if wildcard ==
+	// false), or the root of a wildcard represented by this matcher (if
+	// wildcard == true).
+	dns      string
+	wildcard bool
+}
+
+// match returns whether this matcher matches a given domain.
+func (d *domain) match(dns string) bool {
+	if !d.wildcard {
+		return dns == d.dns
+	}
+	return strings.HasSuffix(dns, "."+d.dns)
+}
+
+// allow adds a given (namespace, dns) pair to the filter. The dns variable is
+// a string that is either a simple domain name, or a wildcard like
+// *.foo.example.com. An error is returned if the dns stirng could not be
+// parsed.
+func (i *ingressFilter) allow(ns, dns string) error {
+	// If the filter is brand new, initialize it.
+	if i.allowed == nil {
+		i.allowed = make(map[string][]*domain)
+	}
+
+	// Try to parse the name as a wildcard.
+	parts := strings.Split(dns, ".")
+	wildcard := false
+	for i, part := range parts {
+		if i == 0 && part == "*" {
+			wildcard = true
+			continue
+		}
+		// Do some basic validation of the name.
+		if part == "" || strings.Contains(part, "*") {
+			return fmt.Errorf("invalid domain")
+		}
+	}
+	if wildcard {
+		if len(parts) < 2 {
+			return fmt.Errorf("invalid domain")
+		}
+		dns = strings.Join(parts[1:], ".")
+	}
+	i.allowed[ns] = append(i.allowed[ns], &domain{
+		dns:      dns,
+		wildcard: wildcard,
+	})
+	return nil
+}
+
+// domainAllowed returns whether a given domain is allowed to be backed by an
+// ingress within a given namespace.
+func (i *ingressFilter) domainAllowed(ns, domain string) bool {
+	if i.allowed == nil {
+		return true
+	}
+
+	domainFound := false
+	// TODO(q3k): if this becomes too slow, build some inverted index for this.
+	for n, ds := range i.allowed {
+		for _, d := range ds {
+			if !d.match(domain) {
+				continue
+			}
+			// Domain matched, see if allowed in this namespace.
+			domainFound = true
+			if n == ns {
+				return true
+			}
+		}
+		// Otherwise, maybe it's allowed in another domain.
+	}
+	// No direct match found - if this domain has been at all matched before,
+	// it means that it's a restriected domain and the requested namespace is
+	// not one that's allowed to host it. Refuse.
+	if domainFound {
+		return false
+	}
+	// No direct match found, and this domain is not restricted. Allow.
+	return true
+}
+
+func (i *ingressFilter) admit(req *admission.AdmissionRequest) (*admission.AdmissionResponse, error) {
+	if req.Kind.Group != "networking.k8s.io" || req.Kind.Kind != "Ingress" {
+		return nil, fmt.Errorf("not an ingress")
+	}
+	// TODO(q3k); implement
+	return nil, fmt.Errorf("unimplemented")
+}
diff --git a/cluster/admitomatic/ingress_test.go b/cluster/admitomatic/ingress_test.go
new file mode 100644
index 0000000..91cf2b9
--- /dev/null
+++ b/cluster/admitomatic/ingress_test.go
@@ -0,0 +1,78 @@
+package main
+
+import "testing"
+
+func TestPatterns(t *testing.T) {
+	f := ingressFilter{}
+	// Test that sane filters are allowed.
+	for _, el := range []struct {
+		ns     string
+		domain string
+	}{
+		{"matrix", "matrix.hackerspace.pl"},
+		{"ceph-waw3", "*.hackerspace.pl"},
+		{"personal-q3k", "*.k0.q3k.org"},
+		{"personal-vuko", "shells.vuko.pl"},
+		{"minecraft", "*.k0.q3k.org"},
+	} {
+		err := f.allow(el.ns, el.domain)
+		if err != nil {
+			t.Fatalf("allow(%q, %q): %v", el.ns, el.domain, err)
+		}
+	}
+	// Test that broken patterns are rejected.
+	if err := f.allow("borked", "*.hackerspace.*"); err == nil {
+		t.Fatalf("allow(double star): wanted err, got nil")
+	}
+	if err := f.allow("borked", ""); err == nil {
+		t.Fatalf("allow(empty): wanted err, got nil")
+	}
+	if err := f.allow("borked", "*foo.example.com"); err == nil {
+		t.Fatalf("allow(partial wildcard): wanted err, got nil")
+	}
+}
+
+func TestMatch(t *testing.T) {
+	f := ingressFilter{}
+	// Errors discarded, tested in TestPatterns.
+	f.allow("matrix", "matrix.hackerspace.pl")
+	f.allow("ceph-waw3", "*.hackerspace.pl")
+	f.allow("personal-q3k", "*.k0.q3k.org")
+	f.allow("personal-vuko", "shells.vuko.pl")
+	f.allow("minecraft", "*.k0.q3k.org")
+
+	for _, el := range []struct {
+		ns       string
+		dns      string
+		expected bool
+	}{
+		// Explicitly allowed.
+		{"matrix", "matrix.hackerspace.pl", true},
+		// *.hackerspace.pl is explicitly mentioned in ceph-waw3, so this is
+		// forbidden.
+		{"matrix", "matrix2.hackerspace.pl", false},
+		// Hackers should not be able to take over critical domains.
+		{"personal-hacker", "matrix.hackerspace.pl", false},
+		{"personal-hacker", "totallylegit.hackerspace.pl", false},
+		// q3k can do his thing, even nested..
+		{"personal-q3k", "foo.k0.q3k.org", true},
+		{"personal-q3k", "foo.bar.k0.q3k.org", true},
+		// counterintuitive: only *.k0.q3k.org is constrained, so k0.q3k.org
+		// (as anything.q3k.org) is allowed everywhere.
+		{"personal-hacker", "k0.q3k.org", true},
+		// vuko's shell service is only allowed in his NS.
+		{"personal-vuko", "shells.vuko.pl", true},
+		// counterintuitive: vuko.pl is allowed everywhere else, too. This is
+		// because there's no *.vuko.pl wildcard anywhere, so nothing would
+		// block it. Solution: add an explicit *.vuko.pl wildcard to the
+		// namespace, or just don't do a wildcard CNAME redirect to our
+		// ingress.
+		{"personal-hacker", "foobar.vuko.pl", true},
+		// Unknown domains are fine.
+		{"personal-hacker", "www.github.com", true},
+	} {
+		if want, got := el.expected, f.domainAllowed(el.ns, el.dns); got != want {
+			t.Errorf("%q on %q is %v, wanted %v", el.dns, el.ns, got, want)
+		}
+	}
+}
diff --git a/cluster/admitomatic/main.go b/cluster/admitomatic/main.go
new file mode 100644
index 0000000..3178818
--- /dev/null
+++ b/cluster/admitomatic/main.go
@@ -0,0 +1,45 @@
+package main
+
+import (
+	"context"
+	"flag"
+	"net/http"
+	"time"
+
+	"code.hackerspace.pl/hscloud/go/mirko"
+	"github.com/golang/glog"
+)
+
+var (
+	flagListen = "127.0.0.1:8080"
+)
+
+func main() {
+	flag.StringVar(&flagListen, "pub_listen", flagListen, "Address to listen on for HTTP traffic")
+	flag.Parse()
+
+	m := mirko.New()
+	if err := m.Listen(); err != nil {
+		glog.Exitf("Listen(): %v", err)
+	}
+
+	if err := m.Serve(); err != nil {
+		glog.Exitf("Serve(): %v", err)
+	}
+
+	mux := http.NewServeMux()
+	// TODO(q3k): implement admission controller
+	srv := &http.Server{Addr: flagListen, Handler: mux}
+
+	glog.Infof("Listening on %q...", flagListen)
+	go func() {
+		if err := srv.ListenAndServe(); err != nil {
+			glog.Error(err)
+		}
+	}()
+
+	<-m.Done()
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	srv.Shutdown(ctx)
+}
diff --git a/cluster/kube/k0-cockroach.jsonnet b/cluster/kube/k0-cockroach.jsonnet
new file mode 100644
index 0000000..faf0f14
--- /dev/null
+++ b/cluster/kube/k0-cockroach.jsonnet
@@ -0,0 +1,7 @@
+// Only the CockroachDB cluster - nodes, clients etc.
+
+local k0 = (import "k0.libsonnet").k0;
+
+{
+    cockroach: k0.cockroach,
+}
diff --git a/cluster/kube/k0.libsonnet b/cluster/kube/k0.libsonnet
index ad6b735..44f83d0 100644
--- a/cluster/kube/k0.libsonnet
+++ b/cluster/kube/k0.libsonnet
@@ -65,7 +65,7 @@
                     topology: [
                         { name: "bc01n01", node: "bc01n01.hswaw.net" },
                         { name: "bc01n02", node: "bc01n02.hswaw.net" },
-                        { name: "bc01n03", node: "bc01n03.hswaw.net" },
+                        { name: "dcr01s22", node: "dcr01s22.hswaw.net" },
                     ],
                     // Host path on SSD.
                     hostPath: "/var/db/crdb-waw1",
@@ -80,112 +80,19 @@
                 buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
                 sso: k0.cockroach.waw2.Client("sso"),
                 herpDev: k0.cockroach.waw2.Client("herp-dev"),
+                gitea: k0.cockroach.waw2.Client("gitea"),
             },
         },
 
         ceph: {
             // waw1 cluster - dead as of 2019/08/06, data corruption
-            // waw2 cluster: shitty 7200RPM 2.5" HDDs
-            waw2: rook.Cluster(k0.cluster.rook, "ceph-waw2") {
-                spec: {
-                    mon: {
-                        count: 3,
-                        allowMultiplePerNode: false,
-                    },
-                    storage: {
-                        useAllNodes: false,
-                        useAllDevices: false,
-                        config: {
-                            databaseSizeMB: "1024",
-                            journalSizeMB: "1024",
-                        },
-                        nodes: [
-                            {
-                                name: "bc01n01.hswaw.net",
-                                location: "rack=dcr01 chassis=bc01 host=bc01n01",
-                                devices: [ { name: "sda" } ],
-                            },
-                            {
-                                name: "bc01n02.hswaw.net",
-                                location: "rack=dcr01 chassis=bc01 host=bc01n02",
-                                devices: [ { name: "sda" } ],
-                            },
-                            {
-                                name: "bc01n03.hswaw.net",
-                                location: "rack=dcr01 chassis=bc01 host=bc01n03",
-                                devices: [ { name: "sda" } ],
-                            },
-                        ],
-                    },
-                    benji:: {
-                        metadataStorageClass: "waw-hdd-paranoid-2",
-                        encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
-                        pools: [
-                            "waw-hdd-redundant-2",
-                            "waw-hdd-redundant-2-metadata",
-                            "waw-hdd-paranoid-2",
-                            "waw-hdd-yolo-2",
-                        ],
-                        s3Configuration: {
-                            awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
-                            awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
-                            bucketName: "benji-k0-backups",
-                            endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
-                        },
-                    }
-                },
-            },
-            waw2Pools: {
-                // redundant block storage
-                blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
-                    spec: {
-                        failureDomain: "host",
-                        erasureCoded: {
-                            dataChunks: 2,
-                            codingChunks: 1,
-                        },
-                    },
-                },
-                // paranoid block storage (3 replicas)
-                blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
-                    spec: {
-                        failureDomain: "host",
-                        replicated: {
-                            size: 3,
-                        },
-                    },
-                },
-                // yolo block storage (no replicas!)
-                blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
-                    spec: {
-                        failureDomain: "host",
-                        replicated: {
-                            size: 1,
-                        },
-                    },
-                },
-                objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
-                    spec: {
-                        metadataPool: {
-                            failureDomain: "host",
-                            replicated: { size: 3 },
-                        },
-                        dataPool: {
-                            failureDomain: "host",
-                            erasureCoded: {
-                                dataChunks: 2,
-                                codingChunks: 1,
-                            },
-                        },
-                    },
-                },
-            },
+            // waw2 cluster - dead as of 2021/01/22, torn down (horrible M610 RAID controllers are horrible)
 
             // waw3: 6TB SAS 3.5" HDDs
             waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
                 spec: {
                     mon: {
-                        count: 3,
+                        count: 1,
                         allowMultiplePerNode: false,
                     },
                     storage: {
@@ -267,8 +174,8 @@
                     spec: {
                         failureDomain: "osd",
                         erasureCoded: {
-                            dataChunks: 12,
-                            codingChunks: 4,
+                            dataChunks: 2,
+                            codingChunks: 1,
                         },
                     },
                 },
@@ -383,7 +290,6 @@
         # bits they use, whatever those might be.
         # TODO(q3k): fix this?
         unnecessarilyInsecureNamespaces: [
-            policies.AllowNamespaceInsecure("ceph-waw2"),
             policies.AllowNamespaceInsecure("ceph-waw3"),
             policies.AllowNamespaceInsecure("matrix"),
             policies.AllowNamespaceInsecure("registry"),
diff --git a/cluster/kube/lib/cockroachdb.libsonnet b/cluster/kube/lib/cockroachdb.libsonnet
index 8ebad52..9f206f0 100644
--- a/cluster/kube/lib/cockroachdb.libsonnet
+++ b/cluster/kube/lib/cockroachdb.libsonnet
@@ -42,7 +42,7 @@
         local cluster = self,
 
         cfg:: {
-            image: "cockroachdb/cockroach:v19.1.0",
+            image: "cockroachdb/cockroach:v20.2.4",
 
             # Must be unique per cluster.
             portServe: 26257,
diff --git a/cluster/kube/lib/rook.libsonnet b/cluster/kube/lib/rook.libsonnet
index 8f83d2d..c8e38a8 100644
--- a/cluster/kube/lib/rook.libsonnet
+++ b/cluster/kube/lib/rook.libsonnet
@@ -10,7 +10,7 @@
         local env = self,
         local cfg = env.cfg,
         cfg:: {
-            image: "rook/ceph:v1.1.9",
+            image: "rook/ceph:v1.2.7",
             namespace: "rook-ceph-system",
         },
 
@@ -54,6 +54,7 @@
                             },
                             dataDirHostPath: oa.String { pattern: "^/(\\S+)" },
                             skipUpgradeChecks: oa.Boolean,
+                            continueUpgradeAfterChecksEvenIfNotHealthy: oa.Boolean,
                             mon: oa.Dict {
                                 allowMultiplePerNode: oa.Boolean,
                                 count: oa.Integer { minimum: 0, maximum: 9 },
@@ -236,6 +237,15 @@
                     subresources: { status: {} },
                 },
             },
+            cephclients: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephClient") {
+                spec+: {
+                    validation: oa.Validation(oa.Dict {
+                        spec: oa.Dict {
+                            caps: oa.Any,
+                        },
+                    }),
+                },
+            },
         },
 
         sa: {
@@ -307,9 +317,24 @@
                     },
                     {
                         apiGroups: ["policy", "apps"],
-                        resources: ["poddisruptionbudgets", "deployments"],
+                        resources: ["poddisruptionbudgets", "deployments", "replicasets"],
                         verbs: ["*"],
                     },
+                    {
+                        apiGroups: ["healthchecking.openshift.io"],
+                        resources: ["machinedisruptionbudgets"],
+                        verbs: ["get", "list", "watch", "create", "update", "delete"],
+                    },
+                    {
+                        apiGroups: ["machine.openshift.io"],
+                        resources: ["machines"],
+                        verbs: ["get", "list", "watch", "create", "update", "delete"],
+                    },
+                    {
+                        apiGroups: ["storage.k8s.io"],
+                        resources: ["csidrivers"],
+                        verbs: ["create"],
+                    },
                 ],
             },
 
@@ -733,7 +758,17 @@
                         apiGroups: [""],
                         resources: ["configmaps"],
                         verbs: ["get", "list", "watch", "create", "update", "delete"],
-                    }
+                    },
+                ],
+            },
+            osdCluster: kube.ClusterRole(cluster.name("osd-cluster")) {
+                metadata+: cluster.metadata { namespace:: null },
+                rules: [
+                    {
+                        apiGroups: [""],
+                        resources: ["nodes"],
+                        verbs: ["get", "list"],
+                    },
                 ],
             },
             mgr: kube.Role(cluster.name("mgr")) {
@@ -802,13 +837,21 @@
             subjects_: [cluster.sa.mgr],
         },
 
+        osdClusterRB: kube.ClusterRoleBinding(cluster.name("osd-cluster")) {
+            metadata+: {
+                namespace:: null,
+            },
+            roleRef_: cluster.roles.osdCluster,
+            subjects_: [cluster.sa.osd],
+        },
+
+
         cluster: kube._Object("ceph.rook.io/v1", "CephCluster", name) {
             metadata+: cluster.metadata,
             spec: {
                 cephVersion: {
                     # https://github.com/rook/rook/issues/2945#issuecomment-483964014
-                    #image: "ceph/ceph:v13.2.5-20190319",
-                    image: "ceph/ceph:v14.2.2-20190830",
+                    image: "ceph/ceph:v14.2.16",
                     allowUnsupported: true,
                 },
                 dataDirHostPath: if name == "ceph-waw2" then "/var/lib/rook" else "/var/lib/rook-%s" % [name],
diff --git a/cluster/nix/defs-machines.nix b/cluster/nix/defs-machines.nix
index 4c50f67..da9150c 100644
--- a/cluster/nix/defs-machines.nix
+++ b/cluster/nix/defs-machines.nix
@@ -25,19 +25,10 @@
     mgmtIf = "eno1";
     stateVersion = "18.09";
   }
-  rec {
-    name = "bc01n03";
-    threads = 16;
-    fqdn = "${name}.hswaw.net";
-    ipAddr = "185.236.240.37";
-    ipAddrBits = 28;
-    gw = "185.236.240.33";
-    podNet = "10.10.18.0/24";
-    diskBoot = "/dev/disk/by-id/scsi-360024e8078a8fa0023b1787e0605a3e0";
-    fsRoot = "/dev/disk/by-uuid/afc05836-c9b3-4d7e-b0a2-3ebfe2336d4f";
-    mgmtIf = "eno1";
-    stateVersion = "18.09";
-  }
+  # Tombstone - bc01n03 suffered from hardware failure on 2021/01/10.
+  # rec {
+  #   name = "bc01n03";
+  # }
   rec {
     name = "dcr01s22";
     threads = 48;
diff --git a/devtools/gerrit/kube/prod.jsonnet b/devtools/gerrit/kube/prod.jsonnet
index 68f68cc..5f47210 100644
--- a/devtools/gerrit/kube/prod.jsonnet
+++ b/devtools/gerrit/kube/prod.jsonnet
@@ -11,7 +11,7 @@
             domain: "gerrit.hackerspace.pl",
             identity: "7b6244cf-e30b-42c5-ba91-c329ef4e6cf1",
 
-            storageClassName: "waw-hdd-paranoid-2",
+            storageClassName: "waw-hdd-redundant-3",
 
             secureSecret: "gerrit",
         },
diff --git a/devtools/kube/depotview.libsonnet b/devtools/kube/depotview.libsonnet
index 019542c..f82ff84 100644
--- a/devtools/kube/depotview.libsonnet
+++ b/devtools/kube/depotview.libsonnet
@@ -10,7 +10,12 @@
         local depotview = self,
         cfg+: {
             image: cfg.image,
-            container: depotview.GoContainer("main", "/devtools/depotview") {}
+            container: depotview.GoContainer("main", "/devtools/depotview") {
+                resources: {
+                    requests: { cpu: "25m", memory: "256Mi" },
+                    limits: { cpu: "500m", memory: "512Mi" },
+                },
+            }
         },
     }
 }
diff --git a/ops/monitoring/k0.jsonnet b/ops/monitoring/k0.jsonnet
index 62810c5..30dd687 100644
--- a/ops/monitoring/k0.jsonnet
+++ b/ops/monitoring/k0.jsonnet
@@ -9,6 +9,7 @@
         storageClasses+: {
             prometheus: "waw-hdd-redundant-3",
             victoria: "waw-hdd-redundant-3",
+            grafana: "waw-hdd-redundant-3",
         },
     },
 
@@ -25,15 +26,22 @@
     // Global tier - victoria metrics.
     global: global.Global("k0") {
         cfg+: cfg {
+            oauth: {
+                clientId: "22659ba3-c8b2-4855-9553-f78884e0d743",
+                clientSecret: std.split(importstr "secrets/plain/global-oauth-client-secret", "\n")[0],
+            },
             hosts: {
                 globalAPI: "monitoring-global-api.k0.hswaw.net",
+                globalDashboard: "monitoring-global-dashboard.k0.hswaw.net",
             },
             agents: [
                 // Ingestion from k0 cluster tier.
                 { username: k0.cluster.cfg.username, password: std.split(importstr "secrets/plain/global-agent-cluster-k0", "\n")[0], },
-                // Access from q3k's test Grafana.
-                { username: "grafana", password: std.split(importstr "secrets/plain/global-agent-grafana", "\n")[0], },
             ],
+            loopbackGrafanaUser: {
+                username: "grafana",
+                password: std.split(importstr "secrets/plain/global-agent-grafana", "\n")[0],
+            },
         }, 
     },
 }
diff --git a/ops/monitoring/lib/cluster.libsonnet b/ops/monitoring/lib/cluster.libsonnet
index 511d426..00aa792 100644
--- a/ops/monitoring/lib/cluster.libsonnet
+++ b/ops/monitoring/lib/cluster.libsonnet
@@ -60,63 +60,106 @@
                     bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token",
                 },
 
-                scrape_configs: [
-                    // When scraping node-based metrics (ie. node and cadvisor metrics) we contact
-                    // the metrics endpoints on the kubelet via the API server. This is done by
-                    // relabeling _address__ and __metrics_path__ to point at the k8s API server,
-                    // and at the API server proxy path to reach a node's metrics endpoint.
-                    //
-                    // This approach was lifted from the prometheus examples for Kubernetes, and
-                    // while the benefits outlined there do not matter that much to us (our
-                    // kubelets listen on public addresses, anyway), we still enjoy this approach
-                    // for the fact that we don't have to hardcode the kubelet TLS port.
-                    //
-                    // https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml
-                    //
-                    // When contacting the API server, we hardcode the 'hswaw.net' DNS suffix as
-                    // our API server's TLS certificate only has a CN/SAN for its full FQDN, not
-                    // the .svc.cluster.local shorthand (see //cluster/clustercfg:clustercfg.py).
 
-                    // Scrape Kubernetes node metrics via apiserver. This emites kube_node_* metrics.
-                    kubeScrapeConfig("cluster_node_metrics", "node") {
-                        relabel_configs: [
-                            {
-                                action: "labelmap",
-                                regex: "__meta_kubernetes_node_label_(.+)",
-                            },
-                            {
-                                action: "replace",
-                                target_label: "__address__",
-                                replacement: "kubernetes.default.svc.%s.hswaw.net:443" % [cluster.cfg.name],
-                            },
-                            {
-                                target_label: "__metrics_path__",
-                                source_labels: ["__meta_kubernetes_node_name"],
-                                regex: "(.+)",
-                                replacement: "/api/v1/nodes/${1}/proxy/metrics",
-                            },
-                        ],
+                // When scraping node-based metrics (ie. node and cadvisor metrics) we contact
+                // the metrics endpoints on the kubelet via the API server. This is done by
+                // relabeling _address__ and __metrics_path__ to point at the k8s API server,
+                // and at the API server proxy path to reach a node's metrics endpoint.
+                //
+                // This approach was lifted from the prometheus examples for Kubernetes, and
+                // while the benefits outlined there do not matter that much to us (our
+                // kubelets listen on public addresses, anyway), we still enjoy this approach
+                // for the fact that we don't have to hardcode the kubelet TLS port.
+                //
+                // https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml
+                //
+                // When contacting the API server, we hardcode the 'hswaw.net' DNS suffix as
+                // our API server's TLS certificate only has a CN/SAN for its full FQDN, not
+                // the .svc.cluster.local shorthand (see //cluster/clustercfg:clustercfg.py).
+                local kubeScrapeNodeMetrics = function(name, path) kubeScrapeConfig(name, "node") {
+                    relabel_configs: [
+                        {
+                            action: "labelmap",
+                            regex: "__meta_kubernetes_node_label_(.+)",
+                        },
+                        {
+                            action: "replace",
+                            target_label: "__address__",
+                            replacement: "kubernetes.default.svc.%s.hswaw.net:443" % [cluster.cfg.name],
+                        },
+                        {
+                            target_label: "__metrics_path__",
+                            source_labels: ["__meta_kubernetes_node_name"],
+                            regex: "(.+)",
+                            replacement: "/api/v1/nodes/${1}/proxy" + path,
+                        },
+                    ],
+                },
+
+                // When scraping API server-colocated metrics (ie. metrics from nixos services running alongside
+                // APIserver instances), we contact the metrics endpoints directly over the node's IP addresses
+                // and an external port. The node IP addresses are discovered via Prometheus kubernetes endpoint
+                // discovery which selects all endpoints for the default/kubernetes service. This service is
+                // backed by apiserver instances on public IP addresses. We can then rewrite the received port
+                // by the port of the service we're interested in to get to that service.
+                local kubeScrapeAPIServerColocated = function(name, port) kubeScrapeConfig(name, "endpoints") {
+                    relabel_configs: [
+                        // Select only endpoints that back the default/kubernetes service. These are all
+                        // public IP addresses of nodes that run the API server.
+                        {
+                            action: "keep",
+                            regex: "default;kubernetes;https",
+                            source_labels: [
+                                "__meta_kubernetes_namespace",
+                                "__meta_kubernetes_service_name",
+                                "__meta_kubernetes_endpoint_port_name",
+                            ],
+                        },
+                    ] + (if port == 4001 then [] else [
+                        // Replace endpoint port with requested port, if the requested port is not the apiserver's
+                        // port 4001, which is the one returned by default for the these endpoints.
+                        {
+                            action: "replace",
+                            regex: "([^:]+):.+",
+                            replacement: "$1:%d" % [port],
+                            source_labels: [
+                                "__address__",
+                            ],
+                            target_label: "__address__",
+                        },
+                    ]),
+                    // We disable server-side TLS certificate verification.
+                    // Unfortunately, all apiserver-colocated services run with TLS certificates that do not have
+                    // the right IP address SAN. Unfortunately, we can't override the TLS ServerName for a scrape
+                    // target [1], so the only two choiced we are left with are:
+                    //   1) re-emit relevant certificates with IP address SANs that allow for access by IP.
+                    //   2) disable TLS verification.
+                    // We choose 2), knowing that if someone manages to hijack a target IP address they can end up
+                    // stealing our bearer token and impersonating the service account with which Prometheus is
+                    // running. In the long term, we hope for [1] to be resolved.
+                    //
+                    // TODO(q3k): revisit this once [1] gets fixed.
+                    // [1] - https://github.com/prometheus/prometheus/issues/4827
+                    tls_config: {
+                        insecure_skip_verify: true,
                     },
+                },
+
+                scrape_configs: [
+                    /// Scrape per-node metrics, proxied via the APIServer..
+                    // Scrape Kubernetes node metrics via apiserver. This emits kube_node_* metrics.
+                    kubeScrapeNodeMetrics("cluster_node_metrics", "/metrics"),
                     // Scrape Kubernetes node cadvisor metrics via apiserver. This emits container_* metrics.
-                    kubeScrapeConfig("cluster_cadvisor_metrics", "node") {
-                        relabel_configs: [
-                            {
-                                action: "labelmap",
-                                regex: "__meta_kubernetes_node_label_(.+)",
-                            },
-                            {
-                                action: "replace",
-                                target_label: "__address__",
-                                replacement: "kubernetes.default.svc.%s.hswaw.net:443" % [cluster.cfg.name],
-                            },
-                            {
-                                target_label: "__metrics_path__",
-                                source_labels: ["__meta_kubernetes_node_name"],
-                                regex: "(.+)",
-                                replacement: "/api/v1/nodes/${1}/proxy/metrics/cadvisor",
-                            },
-                        ],
-                    },
+                    kubeScrapeNodeMetrics("cluster_cadvisor_metrics", "/metrics/cadvisor"),
+
+                    /// Scape apiserver-colocated ('master node') metrics, over nodes' public IP addresses.
+                    /// (currently all nodes are 'master' nodes)
+                    // Scrape Kubernetes apiserver metrics.
+                    kubeScrapeAPIServerColocated("cluster_apiserver_metrics", 4001),
+                    // Scrape Kubernetes controller-manager metrics.
+                    kubeScrapeAPIServerColocated("cluster_controllermanager_metrics", 4003),
+                    // Scrape Kubernetes scheduler metrics.
+                    kubeScrapeAPIServerColocated("cluster_scheduler_metrics", 4005),
                 ],
 
                 remote_write: [
@@ -152,6 +195,7 @@
                     { nonResourceURLs: ["/metrics"], verbs: ["get"], },
                     // Allow to access node details for discovery.
                     { apiGroups: [""], resources: ["nodes"], verbs: ["list", "watch", "get"], },
+                    { apiGroups: [""], resources: ["endpoints", "services", "pods"], verbs: ["list", "watch", "get"], },
                     // Allow to proxy to bare node HTTP to access per-node metrics endpoints. 
                     { apiGroups: [""], resources: ["nodes/proxy"], verbs: ["get"], },
                 ],
@@ -183,11 +227,11 @@
                                     ],
                                     resources: {
                                         requests: {
-                                            memory: "256Mi",
+                                            memory: "3Gi",
                                             cpu: "100m",
                                         },
                                         limits: {
-                                            memory: "1Gi",
+                                            memory: "3Gi",
                                             cpu: "1",
                                         },
                                     },
diff --git a/ops/monitoring/lib/global.libsonnet b/ops/monitoring/lib/global.libsonnet
index dbdbebb..6ec9249 100644
--- a/ops/monitoring/lib/global.libsonnet
+++ b/ops/monitoring/lib/global.libsonnet
@@ -18,11 +18,13 @@
             images: {
                 victoria: "victoriametrics/victoria-metrics:v1.40.0",
                 vmauth: "victoriametrics/vmauth:v1.40.0",
+                grafana: "grafana/grafana:7.2.1",
             },
 
             hosts: {
                 // DNS hostname that this global tier will use. Ingress will run under it.
                 globalAPI: error "hosts.globalAPI must be set",
+                globalDashboard: error "hosts.globalDashboard must be set",
             },
 
             storageClasses: {
@@ -30,6 +32,11 @@
                 victoria: error "storageClasses.victoria must be set",
             },
 
+            oauth: {
+                clientId: error "oauth.clientId must be set",
+                clientSecret: error "oauth.clientSecret must be set",
+            },
+
             // A list of agents that will push metrics to this instance.
             // List of:
             // {
@@ -41,10 +48,14 @@
 
         // Generated URLs that agents should use to ship metrics over. Both require HTTP basic
         // auth, configured via cfg.agents.
-        // The internal URL should be used for agents colocated in the same Kubernetes cluster.
+        // The internal URL that should be used for agents colocated in the same Kubernetes cluster.
         internalIngestURL:: "http://%s/api/v1/write" % [global.victoria.serviceAPI.host_colon_port],
-        // The glboal URL should be used for agents sending data over the internet.
+        // The internal URL that should be used for readers colocated in the same Kubernetes cluster.
+        internalReadURL:: "http://%s/" % [global.victoria.serviceAPI.host_colon_port],
+        // The global URL that should be used for agents sending data over the internet.
         globalIngestURL:: "https://%s/api/v1/write" % [cfg.hosts.globalAPI],
+        // The global URL that should be used for readers over the internet.
+        globalReadURL:: "https://%s" % [cfg.hosts.globalAPI],
 
         namespace: kube.Namespace(cfg.namespace),
         local ns = global.namespace,
@@ -73,7 +84,7 @@
                                 password: a.password,
                                 url_prefix: "http://localhost:8428",
                             }
-                            for a in cfg.agents
+                            for a in (cfg.agents + [cfg.loopbackGrafanaUser])
                         ],
                     }) + "\n")
                 },
@@ -145,5 +156,150 @@
                 },
             },
         },
+
+        grafana: {
+            local grafana = self,
+
+            // grafana.ini, serialized to secret.
+            ini:: {
+                sections: {
+                    "auth": {
+                        "disable_login_form": true,
+                        "oauth_auto_login": true,
+                    },
+                    "security": {
+                        # We do not disable basic auth, as we want to use builtin
+                        # users as API users (eg for config reload), but we want
+                        # to disable the default admin:admin user.
+                        "disable_initial_admin_creation": true,
+                    },
+                    "auth.generic_oauth": {
+                        enabled: true,
+                        client_id: cfg.oauth.clientId,
+                        client_secret: cfg.oauth.clientSecret,
+                        auth_url: "https://sso.hackerspace.pl/oauth/authorize",
+                        token_url: "https://sso.hackerspace.pl/oauth/token",
+                        api_url: "https://sso.hackerspace.pl/api/1/userinfo",
+                        scopes: "openid",
+                        email_attribute_path: "email",
+                        allow_sign_up: true,
+                        role_attribute_path: "contains(groups, 'grafana-admin')",
+                    },
+                    "server": {
+                        domain: cfg.hosts.globalDashboard,
+                        root_url: "https://%s/" % [ cfg.hosts.globalDashboard ],
+                    },
+                },
+            },
+
+            datasources:: {
+                apiVersion: 1,
+                datasources: [
+                    {
+                        name: "victoria-global",
+                        type: "prometheus",
+                        uid: "victoria-global",
+                        isDefault: true,
+                        url: global.internalReadURL,
+                        basicAuth: true,
+                        basicAuthUser: cfg.loopbackGrafanaUser.username,
+                        secureJsonData: {
+                            basicAuthPassword: cfg.loopbackGrafanaUser.password,
+                        },
+                    },
+                ],
+            },
+
+            config: ns.Contain(kube.Secret("grafana-config")) {
+                data+: {
+                    "grafana.ini": std.base64(std.manifestIni(grafana.ini)),
+                    "datasources.yaml": std.base64(std.manifestYamlDoc(grafana.datasources)),
+                },
+            },
+
+            pvc: ns.Contain(kube.PersistentVolumeClaim("grafana-data")) {
+                spec+: {
+                    storageClassName: cfg.storageClasses.grafana,
+                    accessModes: ["ReadWriteOnce"],
+                    resources: {
+                        requests: {
+                            storage: "8Gi",
+                        },
+                    },
+                },
+            },
+
+            deploy: ns.Contain(kube.Deployment("grafana")) {
+                spec+: {
+                    template+: {
+                        spec+: {
+                            containers_: {
+                                default: kube.Container("default") {
+                                    image: cfg.images.grafana,
+                                    ports_: {
+                                        public: { containerPort: 3000 },
+                                    },
+                                    env_: {
+                                        GF_PATHS_CONFIG: "/etc/hscloud-config/grafana.ini",
+                                        GF_PATHS_PROVISIONING: "/etc/hscloud-config/provisioning",
+                                        GF_PATHS_DATA: "/var/lib/grafana",
+                                    },
+                                    volumeMounts_: {
+                                        config: { mountPath: "/etc/hscloud-config", },
+                                        data: { mountPath: "/var/lib/grafana", },
+                                    },
+                                    resources: {
+                                        requests: { cpu: "100m", memory: "256M", },
+                                        limits: { cpu: "200m", memory: "512M", },
+                                    },
+                                },
+                            },
+                            volumes_: {
+                                data: kube.PersistentVolumeClaimVolume(grafana.pvc),
+                                config: kube.SecretVolume(grafana.config) {
+                                    secret+: {
+                                        items: [
+                                            { key: "grafana.ini", path: "grafana.ini", },
+                                            { key: "datasources.yaml", path: "provisioning/datasources/datasources.yaml", },
+                                        ],
+                                    },
+                                },
+                            },
+                        },
+                    },
+                },
+            },
+
+            service: ns.Contain(kube.Service("grafana-public")) {
+                target_pod: grafana.deploy.spec.template,
+                spec+: {
+                    ports: [
+                        { name: "public", port: 3000, targetPort: 3000, protocol: "TCP" },
+                    ],
+                },
+            },
+
+            ingress: ns.Contain(kube.Ingress("grafana-public")) {
+                metadata+: {
+                    annotations+: {
+                        "kubernetes.io/tls-acme": "true",
+                        "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+                    },
+                },
+                spec+: {
+                    tls: [
+                        { hosts: [cfg.hosts.globalDashboard], secretName: "ingress-grafana-tls" },
+                    ],
+                    rules: [
+                        {
+                            host: cfg.hosts.globalDashboard,
+                            http: {
+                                paths: [ { path: "/", backend: { serviceName: grafana.service.metadata.name, servicePort: 3000 } }, ],
+                            },
+                        }
+                    ],
+                },
+            },
+        },
     }
 }
diff --git a/ops/monitoring/secrets/cipher/global-oauth-client-secret b/ops/monitoring/secrets/cipher/global-oauth-client-secret
new file mode 100644
index 0000000..77b4e89
--- /dev/null
+++ b/ops/monitoring/secrets/cipher/global-oauth-client-secret
@@ -0,0 +1,40 @@
+-----BEGIN PGP MESSAGE-----
+
+hQEMAzhuiT4RC8VbAQf9Ei3B4VGp5X1sBBvdpD0P1gbZcOMuQrChLKf4WFTkJ31V
+7iK88YzJXM1VN0/GdTS4xk30D9Bh6nkbyWqSQ6e5mI6rU06DHjEF4nH/rCVNNImx
+2lsAfHkvyBYV2rzMD+v7o/WWcR0RzemtopJvJXahM39Dd4WKQEqilcvwFM3p/zAG
+p9svNEpangRCw4viNeP8RzBIHl6d73gcLwYtlmmj/URR4hVh0QByvJE+8tZJaelg
+D2ILnnv30If51H6iRjUSdQYiScPyAc0Ooe7nLNyiZJHe2unv1wpFK/ppW5nTLc6J
+Jl3ku5k5Fza5GLImxT+r3LFaGCUZwI2Ilh+aixOd8YUBDANcG2tp6fXqvgEIAM4s
+Vty4caVhY8wIK4shv+2N8VXxaa8AHBMycfsAdrMG7ohrVLBJcNCs2CfYDRcLLxXq
+y/PU53hffCgg19g1np+8rsYis5JXS8Uqri/54T/S4cMid1UaCq2BIs+1A/9j780G
+4GGArAFDS451t5QjWzXl2W0ZVTeTSVC3s93psht10cZt8APAxlefkoPwSbb2kYz5
+CCOmUGGLwHB87xBl0jRZ55A2Qe77637YEvbRBr79OhztSIJ1WJjkNFLqOVbCDcR0
+IH9kVES2fN/4KCI772P+Rmh330B13UHk9xnu1xEJsi57HjCof+zwGvmEfNrKtS9d
+knHAlDPycEVnQMDVNUOFAgwDodoT8VqRl4UBD/902MbY7Psg+wm7s1ybsclWRA1q
+lJToPhB1NeDhdh/9l51kWT5JvUjS6jCvoGHyJvnxXR6Ot3i+8mjEiHZf6amu5gvq
+skvzQwt+XwtIOaUxJChfRhk+GoyT6EpSHXYDNWKfWPG4gUaM42o8S7BObyjGjwXE
+kTf3bvw50YNqJo7DmSJ1yS/sY4/J9wWT0jz0jSc9PjpAI9qw8vbWSrfbMa7EWos3
+ENyIDl0GlF5S13J5GtyOCQLh9TsHi+zCe/jhmu4uhSeHxyuGru+UvNE1ME0XIUAS
+fUJ5dLIfdLH+ILBRBZ+G0XRT/3XkWlyhuRZf7ALU3tG1wXRV1evc0zv7kEcz2hQm
+gUPXkZzcFIG1cO3r9FhBvAM86p+UHSdsXdRXSVWsH12QFDjv8ZollPzO3ZztQI6a
+R6E3WQ1nyiFjVTHKrCus89UDqBtAiYujfuwLcDYP9wMBW7JpETd1qurccSnL3duh
+3jkKGHeskQPkB9UrT1P66zUjT/gAFDy5/sfVxoO5y+jPAJS9owYrONAoQtTL0HcA
+4ixmaDb3ZzBt1LAfDDlGSjt4agQVfVLeGPF/zrFS4GrqzPDREyfTAsYdokA+y0LM
+XI6mSsHd01HPGpRbsE5ABOO88sqRnuD8KBxWpgaG+Z8zn1uuf7n1L2JRWpFcd8h/
+C09qbhK0+9C80HBZqoUCDAPiA8lOXOuz7wEP/R81sepe2UgcwMuBQmrn30y+kN0i
+93zhYDVJFYUF07b7ociu2OnGFCnFF3ZQNao3ZvSuKoCKkQvcf7mxHA9xkFjiwGAi
+elhHDQcUt8IriosGNhSArujEZ1kc1Nk9MWQKRSLhVXNtdTrn4e15OPXO+AR7CszW
+Kz9Mwo9BNPzu7Zwq1JfUOExpDPT6fPVHZNnzg3KU4s2HRcrLD9JEE2i2/VxbmszH
+aTy+/1kF8hHSfRV0Q7NcjRAbztWrd47HqsWmmWzjcjnSKNV1n7P5AcB06Yjdf0+0
+xEuehwseJs6OhL3MxCsQoFuM9xhm7W/rfGQe+JvJc9Hxb60AgoMGJ1GSHz8xhjyx
+EOujnIabcUeOm0h0twEi98+OJTlKss1YPdcKMPCit7SJZX8k6t2deOp8t0x9R5hH
+v30DRSVgNeqDkBK0dEouR3xLzNz8yardFqVpM88w4D/npUQ5RB6+1af5LFYrm4zG
+kEit4bYdJVpfgt0ZRFoyWaAiAt07ARFmoWeQRRDrpbX+ddKAFmvHl0oyRy/QF2xx
+P6YT8UyEDNraXchAf4cBjuCuiRyqVqaPAOLp3rKmEBBiXddRX9fsq24/9X5QY4o8
+Kemf0fbH9ndsL4vPrJI/j7nvbgq2dpFuHlnFgE5EUEFoPcDI1GI6hUr5UUffnjzM
+aOPp1vxrxhwQy0IG0nQBTdekgVaPiqP+AxVfQbSjz6zNotSJMPAvbx1aNWmxesXO
+eZYeMaSVRnSHub97eb6hn167olrcrAzPxFssb7iTEQh2Xs6PeWbe0FsTz0Fim/yY
+iIw5GlFw15/afo86hbDgrK0j2ZiafKvZYC2EtKoYGzAoxA==
+=8iTI
+-----END PGP MESSAGE-----
diff --git a/ops/sso/prod.jsonnet b/ops/sso/prod.jsonnet
new file mode 100644
index 0000000..07f152e
--- /dev/null
+++ b/ops/sso/prod.jsonnet
@@ -0,0 +1,15 @@
+local sso = import "sso.libsonnet";
+
+{
+    sso: sso {
+        cfg+: {
+            domain: "sso.hackerspace.pl",
+            database+: {
+                host: "public.crdb-waw1.svc.cluster.local",
+                name: "sso",
+                username: "sso",
+                tlsSecret: "client-sso-certificate",
+            },
+        },
+    },
+}
diff --git a/ops/sso/sso.libsonnet b/ops/sso/sso.libsonnet
new file mode 100644
index 0000000..3ac3002
--- /dev/null
+++ b/ops/sso/sso.libsonnet
@@ -0,0 +1,126 @@
+#    kubectl create secret generic sso --from-literal=secret_key=$(pwgen 24 1) --from-literal=ldap_bind_password=...
+
+local kube = import "../../kube/kube.libsonnet";
+
+{
+    local app = self,
+    local cfg = app.cfg,
+
+    cfg:: {
+        namespace: "sso",
+        image: "registry.k0.hswaw.net/informatic/sso-v2@sha256:a44055a4f1d2a4e0708838b571f3a3c018f3b97adfea71ae0cf1df98246bf6cf",
+        domain: error "domain must be set",
+        database: {
+            host: error "database.host must be set",
+            name: error "database.name must be set",
+            username: error "database.username must be set",
+            port: 26257,
+            tlsSecret: error "database.tlsSecret must be set",
+        },
+    },
+
+    ns: kube.Namespace(app.cfg.namespace),
+
+    deployment: app.ns.Contain(kube.Deployment("sso")) {
+        spec+: {
+            replicas: 1,
+            template+: {
+                spec+: {
+                    volumes_: {
+                        crdb: {
+                            secret: {
+                                secretName: cfg.database.tlsSecret,
+                                defaultMode: std.parseOctal("0600"),
+                            },
+                        },
+                        tlscopy: kube.EmptyDirVolume(),  # see initContainers_.secretCopy
+                    },
+                    securityContext: {
+                        runAsUser: 100,
+                        runAsGroup: 101,
+                        fsGroup: 101,
+                    },
+                    initContainers_: {
+                        # psycopg2 / libpq wants its TLS secret keys to be only
+                        # readable by running process. As k8s exposes
+                        # secrets/configmaps as symlinks, libpq gets confused
+                        # and refuses to start, unless we dereference these into
+                        # a local copy with proper permissions.
+                        secretCopy: kube.Container("secret-copy") {
+                            image: cfg.image,
+                            command: ["sh", "-c", "cp -fv /tls-orig/* /tls && chmod 0400 /tls/*"],
+                            volumeMounts_: {
+                                crdb: { mountPath: "/tls-orig" },
+                                tlscopy: { mountPath: "/tls" },
+                            },
+                        },
+                    },
+                    containers_: {
+                        web: kube.Container("sso") {
+                            image: cfg.image,
+                            ports_: {
+                                http: { containerPort: 5000 },
+                            },
+                            env_: {
+                                DATABASE_URI: "cockroachdb://%s@%s:%d/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s" % [
+                                    cfg.database.username,
+                                    cfg.database.host,
+                                    cfg.database.port,
+                                    cfg.database.name,
+                                    "/tls/ca.crt",
+                                    "/tls/tls.crt",
+                                    "/tls/tls.key",
+                                ],
+
+                                LDAP_BIND_PASSWORD: { secretKeyRef: { name: "sso", key: "ldap_bind_password" } },
+                                SECRET_KEY: { secretKeyRef: { name: "sso", key: "secret_key" } },
+                                LOGGING_LEVEL: "DEBUG",
+                            },
+                            volumeMounts_: {
+                                tlscopy: { mountPath: "/tls" },
+                            },
+                        },
+                    },
+                },
+            },
+        },
+    },
+
+    svc: app.ns.Contain(kube.Service("sso")) {
+        target_pod:: app.deployment.spec.template,
+        spec+: {
+            ports: [
+                { name: "http", port: 5000, targetPort: 5000, protocol: "TCP" },
+            ],
+            type: "ClusterIP",
+        },
+    },
+
+    ingress: app.ns.Contain(kube.Ingress("sso")) {
+        metadata+: {
+            annotations+: {
+                "kubernetes.io/tls-acme": "true",
+                "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+                "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+            },
+        },
+        spec+: {
+            tls: [
+                {
+                    hosts: [cfg.domain],
+                    secretName: "sso-tls",
+                },
+            ],
+            rules: [
+                {
+                    host: cfg.domain,
+                    http: {
+                        paths: [
+                            { path: "/", backend: app.svc.name_port },
+                        ]
+                    },
+                }
+            ],
+        },
+    },
+}