Merge changes I84873bc3,I1eedb190
* changes:
ops/monitoring: deploy grafana
ops/monitoring: scrape apiserver, scheduler, and controller-manager
diff --git a/WORKSPACE b/WORKSPACE
index 2c30246..65298b1 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -157,11 +157,11 @@
)
container_pull(
- name = "gerrit-3.0.8",
+ name = "gerrit-3.3.0",
registry = "index.docker.io",
repository = "gerritcodereview/gerrit",
- tag = "3.0.8-ubuntu18",
- digest = "sha256:8f58236129e6547d92502a2e9d8f40129f45f15007beaeafb59fed4faffddb3e",
+ tag = "3.3.0-ubuntu20",
+ digest = "sha256:c7c43db7be19394a9a5f28a016d0063be2713144dc4bb815ceb698c895bc88d1",
)
# third_party/factorio
@@ -174,85 +174,16 @@
git_repository(
name = "com_googlesource_gerrit_bazlets",
remote = "https://gerrit.googlesource.com/bazlets",
- commit = "1d381f01c853e2c02ae35430a8e294e485635d62",
- shallow_since = "1559431096 -0400",
+ commit = "a511f3c90129d7de7ae67c0637001162980c08d5",
+ shallow_since = "1606931369 -0600",
)
load("@com_googlesource_gerrit_bazlets//:gerrit_api.bzl", "gerrit_api")
gerrit_api()
-load("@com_googlesource_gerrit_bazlets//tools:maven_jar.bzl", gerrit_maven_jar = "maven_jar", "GERRIT")
-
-PROLOG_VERS = "1.4.3"
-
-JACKSON_VER = "2.9.7"
-
-gerrit_maven_jar(
- name = "scribe",
- artifact = "org.scribe:scribe:1.3.7",
- sha1 = "583921bed46635d9f529ef5f14f7c9e83367bc6e",
-)
-
-gerrit_maven_jar(
- name = "commons-codec",
- artifact = "commons-codec:commons-codec:1.4",
- sha1 = "4216af16d38465bbab0f3dff8efa14204f7a399a",
-)
-
-gerrit_maven_jar(
- name = "jackson-core",
- artifact = "com.fasterxml.jackson.core:jackson-core:" + JACKSON_VER,
- sha1 = "4b7f0e0dc527fab032e9800ed231080fdc3ac015",
-)
-
-gerrit_maven_jar(
- name = "jackson-databind",
- artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VER,
- sha1 = "e6faad47abd3179666e89068485a1b88a195ceb7",
-)
-
-gerrit_maven_jar(
- name = "jackson-annotations",
- artifact = "com.fasterxml.jackson.core:jackson-annotations:" + JACKSON_VER,
- sha1 = "4b838e5c4fc17ac02f3293e9a558bb781a51c46d",
-)
-
-gerrit_maven_jar(
- name = "jackson-dataformat-yaml",
- artifact = "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:" + JACKSON_VER,
- sha1 = "a428edc4bb34a2da98a50eb759c26941d4e85960",
-)
-
-gerrit_maven_jar(
- name = "snakeyaml",
- artifact = "org.yaml:snakeyaml:1.23",
- sha1 = "ec62d74fe50689c28c0ff5b35d3aebcaa8b5be68",
-)
-
-gerrit_maven_jar(
- name = "prolog-runtime",
- artifact = "com.googlecode.prolog-cafe:prolog-runtime:" + PROLOG_VERS,
- attach_source = False,
- repository = GERRIT,
- sha1 = "d5206556cbc76ffeab21313ffc47b586a1efbcbb",
-)
-
-gerrit_maven_jar(
- name = "prolog-compiler",
- artifact = "com.googlecode.prolog-cafe:prolog-compiler:" + PROLOG_VERS,
- attach_source = False,
- repository = GERRIT,
- sha1 = "f37032cf1dec3e064427745bc59da5a12757a3b2",
-)
-
-gerrit_maven_jar(
- name = "prolog-io",
- artifact = "com.googlecode.prolog-cafe:prolog-io:" + PROLOG_VERS,
- attach_source = False,
- repository = GERRIT,
- sha1 = "d02b2640b26f64036b6ba2b45e4acc79281cea17",
-)
+load("//devtools/gerrit/gerrit-oauth-provider:external_plugin_deps.bzl", gerrit_oauth_deps="external_plugin_deps")
+gerrit_oauth_deps(omit_commons_codec=False)
# minecraft spigot/bukkit deps
# this uses rules_jvm_external vs gerrit's maven_jar because we need SNAPSHOT support
@@ -312,9 +243,11 @@
git_repository(
name = "com_googlesource_gerrit_plugin_owners",
remote = "https://gerrit.googlesource.com/plugins/owners/",
- commit = "5e691e87b8c00a04d261a8dd313f4d16c54797e8",
- shallow_since = "1559729722 +0900",
+ commit = "17817c9e319073c03513f9d5177b6142b8fd567b",
+ shallow_since = "1593642470 +0200",
)
+load("@com_googlesource_gerrit_plugin_owners//:external_plugin_deps_standalone.bzl", gerrit_owners_deps="external_plugin_deps_standalone")
+gerrit_owners_deps()
# Go image repos for Docker
diff --git a/app/matrix/lib/matrix.libsonnet b/app/matrix/lib/matrix.libsonnet
index 5f9ecd2..358b0c9 100644
--- a/app/matrix/lib/matrix.libsonnet
+++ b/app/matrix/lib/matrix.libsonnet
@@ -23,7 +23,7 @@
#
# For appservice-irc instances, you can use this oneliner magic to get the
# registration YAML from logs.
-# kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl logs -n matrix $(kubectl get pods -n matrix --selector=job-name=appservice-irc-freenode-bootstrap --output=jsonpath='{.items[*].metadata.name}') | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/')
+# kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-irc-freenode-bootstrap | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/')
#
# For appservice-telegram instances, you can use this oneliner magic:
# kubectl -n matrix create secret generic appservice-telegram-prod-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-telegram-prod-bootstrap | grep -A 100 SNIPSNIP | grep -v SNIPSNIP)
@@ -44,11 +44,12 @@
images: {
synapse: "matrixdotorg/synapse:v1.19.2",
- riot: "vectorim/riot-web:v1.7.7",
+ riot: "vectorim/riot-web:v1.7.16",
casProxy: "registry.k0.hswaw.net/q3k/oauth2-cas-proxy:0.1.4",
appserviceIRC: "matrixdotorg/matrix-appservice-irc:release-0.17.1",
# That's v0.8.2 - we just don't trust that host to not re-tag images.
appserviceTelegram: "dock.mau.dev/tulir/mautrix-telegram@sha256:9e68eaa80c9e4a75d9a09ec92dc4898b12d48390e01efa4de40ce882a6f7e330",
+ wellKnown: "registry.k0.hswaw.net/q3k/wellknown:1611960794-adbf560851a46ad0e58b42f0daad7ef19535687c",
},
# Central Authentication Scheme, a single-sign-on system. Note: this flow is now called 'SSO' in Matrix, we keep this name for legacy reasons.
@@ -66,6 +67,8 @@
userinfoURL: error "cas.oauth2.userinfoURL must be set",
},
},
+
+ wellKnown: false,
},
metadata(component):: {
@@ -351,6 +354,37 @@
target_pod:: app.riotDeployment.spec.template,
},
+ wellKnown: if cfg.wellKnown then {
+ deployment: kube.Deployment("wellknown") {
+ metadata+: app.metadata("wellknown"),
+ spec+: {
+ replicas: 1,
+ template+: {
+ spec+: {
+ containers_: {
+ web: kube.Container("wellknown") {
+ image: cfg.images.wellKnown,
+ ports_: {
+ http: { containerPort: 8080 },
+ },
+ command: ["/app/matrix/wellknown"],
+ args: ["-hspki_disable", "-domain", cfg.webDomain],
+ },
+ },
+ securityContext: {
+ runAsUser: 101,
+ runAsGroup: 101,
+ },
+ },
+ },
+ },
+ },
+ svc: kube.Service("wellknown") {
+ metadata+: app.metadata("wellknown"),
+ target_pod:: app.wellKnown.deployment.spec.template,
+ },
+ } else {},
+
// Any appservice you add here will require an appservice-X-registration
// secret containing a registration.yaml file. Adding something to this
// dictionary will cause Synapse to not start until that secret is
@@ -383,6 +417,8 @@
{ path: "/_matrix", backend: app.synapseSvc.name_port },
] + (if cfg.cas.enable then [
{ path: "/_cas", backend: app.casSvc.name_port },
+ ] else []) + (if cfg.wellKnown then [
+ { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
] else [])
},
}
diff --git a/app/matrix/matrix.hackerspace.pl.jsonnet b/app/matrix/matrix.hackerspace.pl.jsonnet
index d71dd84..6488fbb 100644
--- a/app/matrix/matrix.hackerspace.pl.jsonnet
+++ b/app/matrix/matrix.hackerspace.pl.jsonnet
@@ -26,8 +26,7 @@
"irc-freenode": irc.AppServiceIrc("freenode") {
cfg+: {
image: cfg.images.appserviceIRC,
- // TODO(q3k): move this appservice to waw-hdd-redundant-3
- storageClassName: "waw-hdd-paranoid-2",
+ storageClassName: "waw-hdd-redundant-3",
metadata: app.metadata("appservice-irc-freenode"),
// TODO(q3k): add labels to blessed nodes
nodeSelector: {
diff --git a/app/matrix/wellknown/BUILD b/app/matrix/wellknown/BUILD
index 1cf4138..b44ac52 100644
--- a/app/matrix/wellknown/BUILD
+++ b/app/matrix/wellknown/BUILD
@@ -1,3 +1,4 @@
+load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
go_library(
@@ -23,3 +24,28 @@
embed = [":go_default_library"],
deps = ["@com_github_go_test_deep//:go_default_library"],
)
+
+container_layer(
+ name = "layer_bin",
+ files = [
+ ":wellknown",
+ ],
+ directory = "/app/matrix/",
+)
+
+container_image(
+ name = "runtime",
+ base = "@prodimage-bionic//image",
+ layers = [
+ ":layer_bin",
+ ],
+)
+
+container_push(
+ name = "push",
+ image = ":runtime",
+ format = "Docker",
+ registry = "registry.k0.hswaw.net",
+ repository = "q3k/wellknown",
+ tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
+)
diff --git a/app/matrix/wellknown/README.me b/app/matrix/wellknown/README.md
similarity index 100%
rename from app/matrix/wellknown/README.me
rename to app/matrix/wellknown/README.md
diff --git a/bgpwtf/invoice/BUILD.bazel b/bgpwtf/invoice/BUILD.bazel
index 900f0b3..950474e 100644
--- a/bgpwtf/invoice/BUILD.bazel
+++ b/bgpwtf/invoice/BUILD.bazel
@@ -1,4 +1,4 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
go_library(
name = "go_default_library",
@@ -31,3 +31,9 @@
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)
+
+go_test(
+ name = "go_default_test",
+ srcs = ["calc_test.go"],
+ embed = [":go_default_library"],
+)
diff --git a/bgpwtf/invoice/calc.go b/bgpwtf/invoice/calc.go
index 9c411da..72933d3 100644
--- a/bgpwtf/invoice/calc.go
+++ b/bgpwtf/invoice/calc.go
@@ -1,17 +1,23 @@
package main
import (
+ "sort"
"time"
pb "code.hackerspace.pl/hscloud/bgpwtf/invoice/proto"
)
+// calculateInvoiceData applies all business logic to populate an Invoice's
+// denormalized fields from its InvoiceData.
func calculateInvoiceData(p *pb.Invoice) {
+ // Populate default unit.
+ // TODO(q3k): this really should be done on invoice submit instead.
p.Unit = p.Data.Unit
if p.Unit == "" {
p.Unit = "€"
}
+ // Calculate totals.
p.TotalNet = 0
p.Total = 0
for _, i := range p.Data.Item {
@@ -24,6 +30,21 @@
i.Total = rowTotal
}
+ // Calculate due date.
due := int64(time.Hour*24) * p.Data.DaysDue
p.DueDate = time.Unix(0, p.Date).Add(time.Duration(due)).UnixNano()
+
+ // Denormalize Items' GTUCodes into the Invoice's summary GTU codes.
+ codeSet := make(map[pb.GTUCode]bool)
+ for _, item := range p.Data.Item {
+ for _, code := range item.GtuCode {
+ codeSet[code] = true
+ }
+ }
+ var codes []pb.GTUCode
+ for c, _ := range codeSet {
+ codes = append(codes, c)
+ }
+ sort.Slice(codes, func(i, j int) bool { return codes[i] < codes[j] })
+ p.GtuCode = codes
}
diff --git a/bgpwtf/invoice/calc_test.go b/bgpwtf/invoice/calc_test.go
new file mode 100644
index 0000000..e8607c9
--- /dev/null
+++ b/bgpwtf/invoice/calc_test.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+ "testing"
+ "time"
+
+ pb "code.hackerspace.pl/hscloud/bgpwtf/invoice/proto"
+)
+
+// Fake test data for test in this file.
+var (
+ itemInternet1 = &pb.Item{
+ Title: "Dostęp do Internetu - Umowa FOOBAR/10 - Opłata Abonentowa 2020/08",
+ Count: 1,
+ UnitPrice: 4200,
+ Vat: 23000,
+ }
+ itemInternet2 = &pb.Item{
+ Title: "Dostęp do Internetu - Umowa FOOBAR/10 - Opłata Abonentowa 2020/09",
+ Count: 1,
+ UnitPrice: 4200,
+ Vat: 23000,
+ }
+ itemHardware = &pb.Item{
+ Title: "Thinkpad x230, i7, 16GB RAM, Refurbished",
+ Count: 1,
+ UnitPrice: 10000,
+ Vat: 23000,
+ GtuCode: []pb.GTUCode{pb.GTUCode_GTU_05},
+ }
+ billing1 = []string{
+ "Wykop Sp. z o. o.",
+ "Zakręt 8",
+ "60-351 Poznań",
+ }
+ billing2 = []string{
+ "TEH Adam Karolczak",
+ "Zgoda 18/2",
+ "95-200 Pabianice",
+ }
+ vatID1 = "PL8086133742"
+ vatID2 = "DE133742429"
+ iban = "PL 59 1090 2402 9746 7956 2256 2375"
+ swift = "WLPPZLPAXXX"
+)
+
+func TestCalculate(t *testing.T) {
+ now := time.Now()
+ for _, te := range []struct {
+ description string
+ data *pb.InvoiceData
+ want *pb.Invoice
+ }{
+ {
+ description: "Invoice without JPK_V7 codes",
+ data: &pb.InvoiceData{
+ Item: []*pb.Item{itemInternet1, itemInternet2},
+ InvoicerBilling: billing1,
+ CustomerBilling: billing2,
+ InvoicerVatId: vatID1,
+ CustomerVatId: vatID2,
+ Date: now.UnixNano(),
+ DaysDue: 21,
+ Iban: iban,
+ Swift: swift,
+ Unit: "PLN",
+ },
+ want: &pb.Invoice{
+ TotalNet: 8400,
+ Total: 10332,
+ Unit: "PLN",
+ },
+ },
+ {
+ description: "Invoice with JPK_V7 codes",
+ data: &pb.InvoiceData{
+ // Repeated item with GTU code GTU_5, to ensure result doesn't
+ // have repeated codes.
+ Item: []*pb.Item{itemInternet1, itemHardware, itemHardware},
+ InvoicerBilling: billing1,
+ CustomerBilling: billing2,
+ InvoicerVatId: vatID1,
+ CustomerVatId: vatID2,
+ Date: now.UnixNano(),
+ DaysDue: 21,
+ Iban: iban,
+ Swift: swift,
+ Unit: "PLN",
+ },
+ want: &pb.Invoice{
+ TotalNet: 24200,
+ Total: 29766,
+ Unit: "PLN",
+ GtuCode: []pb.GTUCode{pb.GTUCode_GTU_05},
+ },
+ },
+ } {
+ t.Run(te.description, func(t *testing.T) {
+ invoice := &pb.Invoice{
+ Data: te.data,
+ Date: te.data.Date,
+ }
+ calculateInvoiceData(invoice)
+ if want, got := te.want.TotalNet, invoice.TotalNet; want != got {
+ t.Errorf("got TotalNet %d, wanted %d", got, want)
+ }
+ if want, got := te.want.Total, invoice.Total; want != got {
+ t.Errorf("got Total %d, wanted %d", got, want)
+ }
+ if want, got := te.want.Unit, invoice.Unit; want != got {
+ t.Errorf("got Unit %q, wanted %q", got, want)
+ }
+ due := time.Duration(int64(time.Hour*24) * te.data.DaysDue)
+ if want, got := now.Add(due).UnixNano(), invoice.DueDate; want != got {
+ t.Errorf("got DueDate %d, wanted %d", got, want)
+ }
+ if want, got := len(te.want.GtuCode), len(invoice.GtuCode); want != got {
+ t.Errorf("got %d GTU codes, wanted %d", got, want)
+ } else {
+ for i, want := range te.want.GtuCode {
+ got := invoice.GtuCode[i]
+ if want != got {
+ t.Errorf("GTU code %d: wanted %s, got %s", i, want.String(), got.String())
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/bgpwtf/invoice/main.go b/bgpwtf/invoice/main.go
index 5133010..ae17dbb 100644
--- a/bgpwtf/invoice/main.go
+++ b/bgpwtf/invoice/main.go
@@ -130,6 +130,10 @@
return &pb.SealInvoiceResponse{}, nil
}
+func (s *service) GetInvoices(req *pb.GetInvoicesRequest, srv pb.Invoicer_GetInvoicesServer) error {
+ return status.Error(codes.Unimplemented, "unimplemented")
+}
+
func init() {
flag.Set("logtostderr", "true")
}
diff --git a/bgpwtf/invoice/model.go b/bgpwtf/invoice/model.go
index fcf4aba..548d8ff 100644
--- a/bgpwtf/invoice/model.go
+++ b/bgpwtf/invoice/model.go
@@ -75,7 +75,7 @@
invoice_id, final_uid, sealed_time
) values (
?,
- ( select printf("%04d", ifnull( (select final_uid as v from invoice_seal order by final_uid desc limit 1), 20000) + 1 )),
+ ( select printf("%04d", ifnull( (select final_uid as v from invoice_seal order by final_uid desc limit 1), 21000) + 1 )),
?
)
`
diff --git a/bgpwtf/invoice/proto/BUILD.bazel b/bgpwtf/invoice/proto/BUILD.bazel
index 51f85fe..2eeae64 100644
--- a/bgpwtf/invoice/proto/BUILD.bazel
+++ b/bgpwtf/invoice/proto/BUILD.bazel
@@ -1,3 +1,4 @@
+load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
diff --git a/bgpwtf/invoice/proto/invoice.proto b/bgpwtf/invoice/proto/invoice.proto
index ee2b9d8..70e6923 100644
--- a/bgpwtf/invoice/proto/invoice.proto
+++ b/bgpwtf/invoice/proto/invoice.proto
@@ -264,12 +264,12 @@
// If sealed, otherwise 'proforma'.
string final_uid = 3;
int64 date = 4;
- int64 due_date = 5;
// Denormalized fields follow.
+ int64 due_date = 5;
uint64 total_net = 6;
uint64 total = 7;
string unit = 8;
- repeated GTUCode gtu_codes = 10;
+ repeated GTUCode gtu_code = 10;
// Next tag: 11;
}
@@ -312,9 +312,48 @@
message SealInvoiceResponse {
}
+message GetInvoicesRequest {
+ // Return all invoices issued in a given year.
+ message ForYear {
+ int32 year = 1;
+ }
+ // Return all invoices issued in a given month of a year.
+ message ForMonth {
+ int32 year = 1;
+ int32 month = 2;
+ }
+
+ oneof range {
+ ForYear for_year = 1;
+ ForMonth for_month = 2;
+ }
+}
+
+message GetInvoicesResponse {
+ // Each chunk may contain an arbitrary amount of invoices, and each
+ // GetInvoices request may return an arbitrary amount of
+ // GetInvoicesResponses in a stream.
+ repeated Invoice invoice = 1;
+}
+
service Invoicer {
+ // Create an invoice with given data, returning UID. The newly created
+ // invoice is created as a proforma invoice and not yet sealed, ie. not
+ // given a unique, sequential ID.
rpc CreateInvoice(CreateInvoiceRequest) returns (CreateInvoiceResponse);
+
+ // Get invoice details for a given UID.
rpc GetInvoice(GetInvoiceRequest) returns (GetInvoiceResponse);
+
+ // Return chunks of a rendered PDF for a given UID. If the invoice is
+ // sealed, the stored PDF will be returned, otherwise a PDF will be
+ // rendered on the fly.
rpc RenderInvoice(RenderInvoiceRequest) returns (stream RenderInvoiceResponse);
+
+ // Seal invoice, ie. assign it a sequential ID and render it to an
+ // immutable PDF for audit purposes.
rpc SealInvoice(SealInvoiceRequest) returns (SealInvoiceResponse);
+
+ // Return a summarized detail of invoice data for a given filter.
+ rpc GetInvoices(GetInvoicesRequest) returns (stream GetInvoicesResponse);
}
diff --git a/bgpwtf/machines/edge01.waw.bgp.wtf-hardware.nix b/bgpwtf/machines/edge01.waw.bgp.wtf-hardware.nix
index d6ed36a..c93048a 100644
--- a/bgpwtf/machines/edge01.waw.bgp.wtf-hardware.nix
+++ b/bgpwtf/machines/edge01.waw.bgp.wtf-hardware.nix
@@ -27,6 +27,7 @@
{ device = "/dev/disk/by-uuid/D8BA-345D";
fsType = "vfat";
};
+ hscloud.anchorvm.blkdev = "/dev/janusz-vg/ripeanchor";
swapDevices =
[ { device = "/dev/disk/by-uuid/5dadcff4-fcd4-4e8d-81f6-be68fb630396"; }
diff --git a/bgpwtf/machines/edge01.waw.bgp.wtf.nix b/bgpwtf/machines/edge01.waw.bgp.wtf.nix
index 1ff21fb..cf61bd9 100644
--- a/bgpwtf/machines/edge01.waw.bgp.wtf.nix
+++ b/bgpwtf/machines/edge01.waw.bgp.wtf.nix
@@ -22,7 +22,7 @@
imports = [
./modules/router.nix
-
+ ./modules/anchorvm.nix
# Private configuration data - notably, customer data.
./secrets/plain/edge01.waw.bgp.wtf-private.nix
];
@@ -129,9 +129,22 @@
ipv4.addresses = [ { address = "185.236.240.14"; prefixLength = 31; } ];
ipv6.addresses = [ { address = "2a0d:eb00:2137:1::a"; prefixLength = 127; } ];
};
+ # VM bridge
+ "br0" = {
+ ipv4.addresses = [ { address = "185.236.240.17"; prefixLength = 29; } ];
+ ipv6.addresses = [ { address = "2a0d:eb00:2137:3::1"; prefixLength = 64; } ];
+ };
# Extra interface configs contained in //bgpwtf/machines/secrets/plain/edge01.waw.bgp.wtf-private.nix
};
+ networking.bridges = {
+ "br0" = {
+ interfaces = [];
+ };
+ };
+ hscloud.anchorvm = {
+ bridge = "br0";
+ };
hscloud.routing.enable = true;
hscloud.routing.routerID = "185.236.240.1";
diff --git a/bgpwtf/machines/modules/anchorvm.nix b/bgpwtf/machines/modules/anchorvm.nix
new file mode 100644
index 0000000..9eddde9
--- /dev/null
+++ b/bgpwtf/machines/modules/anchorvm.nix
@@ -0,0 +1,53 @@
+# This module runs the RIPE anchor VM in a bare qemu.
+# It's expected that a storage LV is created independently and passed as blkdev.
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+ cfg = config.hscloud.anchorvm;
+
+in {
+ options.hscloud.anchorvm = {
+ blkdev = mkOption {
+ type = types.str;
+ description = "Root block device";
+ };
+ bridge = mkOption {
+ type = types.str;
+ description = "bridge interface";
+ };
+ ram = mkOption {
+ type = types.int;
+ description = "memory allocated to the vm";
+ default = 2048;
+ };
+ };
+
+ config.environment = {
+ # qemu-bridge-helper (needed for -nic bridge) requires this file to exist.
+ # We're running as root and don't care about the ACL functionality, so just
+ # make a minimal file that allows the interface.
+ # This snippet stolen from nixpkgs//libvirtd.nix
+ etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n" (e:
+ "allow ${e}") [cfg.bridge];
+ };
+
+ config.systemd.services.anchorvm = {
+ wantedBy = [ "multi-user.target" ];
+ after = [
+ "network.target"
+ ];
+ serviceConfig = {
+ Type = "simple";
+ # spawn=allow needed for bridge helper
+ ExecStart = ''${pkgs.qemu}/bin/qemu-kvm \
+ -nographic -m ${toString cfg.ram} -smp 2 \
+ -drive file=${cfg.blkdev},if=virtio,cache=none,format=raw \
+ -nic bridge,br=${cfg.bridge},model=virtio-net-pci \
+ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=allow,resourcecontrol=deny
+ '';
+ Restart = "always";
+ };
+ };
+}
diff --git a/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix b/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
index f97f1eb..aea2b80 100644
--- a/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
+++ b/bgpwtf/machines/secrets/cipher/edge01.waw.bgp.wtf-private.nix
@@ -1,50 +1,51 @@
-----BEGIN PGP MESSAGE-----
-hQEMAzhuiT4RC8VbAQf+La/HpIwKtvmb8ZNhG9lDxmAxibz3G1WgDS5SMMuP3BnO
-IK+sOR5MOfae+Cry82f4Nlg24cJDe4963vNq0g+MiLnGpxmLHb+plQxkf5S2yU55
-+dKwa/usTJZLa97yqsh8+vdfAnA+C3BaLhhDOO5sgZkArpgMGQ6trUsn+6vmhAmZ
-LLw0rlQjz/IvLu8WW8RqZL17ruXOov4qF0NB6yroZAN4j5spGpq65J2puAZb+rI3
-ZGTm8ie6zb7IlW8RWG96TWk48KcYgLnivxVJwll+zn9Mb0g2AzJWyN7bOy1t1wHs
-yYz5xyB8H/Nqhv2dLfHUtXjQF9uh0ivWewWgzzggjIUBDANcG2tp6fXqvgEH/iz7
-OYsZ+Mj42CYjNntwauHCfmGrvSJwVgco+hzfNmAT2wCHwoZKdktLq1oUuyRrPPwK
-nSe/L249Q7Ick47Lk+NrCIjCFsz03ZzTrEeuY3Yav5m9hzifwLJaeTDy+A+jde2D
-zg3gxuAeaaXfrFmKXwfhpf5/t2LCVFtmdmMg+1ButhgWKMDJL6wQVX5EZjHF6PU0
-n2X+XkXFa807FrZ4sI29pxEmakKtFQHsPlP9G2ydBrz2EmcrSgN+uKorAqEtwUBJ
-zuT7Qg4Ewb2hcPTOR4xLV/DCY80IQyWKEcSj1Hjtes3OA5osDlJWeZvoNUFlXSli
-Wc8S087+7fqmRIohU7+FAgwDodoT8VqRl4UBD/9OivAOdH/LxU0WpT7Bofcyw6iA
-QQZ9kN5SLAY6S9pT274QwHkF/sQGt90Gjq0kIMjm4gc5Cx4kAYoT59Ep/R6HmHC7
-u7D+GuByyiSgAa/YEk3tIAI6/gOeVx5BbG6QNL+ysI8XgRrpRWaEfz8OOpBIALgC
-oBSKBaqxApCejkdO4K3P+lT3jJQ2uOT1i72eBhKOpBA1Vz4BWM/w9HFkhw37sWXc
-i34yrlXdldVpJxbKXMyg4f5hz9OrDCTkLce/dwlZpFGFF+JArFLJ2iLytlFOlTDX
-g5yZLSfziw/B5q1mBevJoSFRgZys3z8xmVD/ETS+1DehJjQsLKo/QlpLqx2dj2qd
-9R5OVGMc2s9YmEAee5aoy8z+egZwo0n48Y10LKwro4XCev2ozy73m3nx7uydIwI6
-jl4qYk6qYASJVK9qrBeB//chxrATLS2PgJqO4oKl8EBvmWeUYR4qaOE/p8QlcLLQ
-Vm5Fm+iITPsSNZgK3AGI0S8CoRLPi/o3C5VvAlEt+bfEuTpR8zzeZLfDqOJ94kOp
-4vKaoQTRzHk8itMkdMeBNBAIGh7fEMqiu3kqtlMunp74uKoY/uzh2rhjBzE7quKB
-x3gTgAPnGTiIFvP1ZaNMjsW9OCsOU2Fkv9syLrpkMuGOfCOLSULQG3GYxG4kybzo
-8w6SQ/y/fDermA+6+YUCDAPiA8lOXOuz7wEP/AmhfKpJoizMTULib/Xf7+A9x34K
-eeLsLPnoRZFdO2pTv7ZtNryOmzVPsXjiuwgO9ULrW31WDeb9KNmLk9JyWji6iED1
-wbbSkFT0yzsMD1BjmxNdd9WHK55sxvtX+yH2MHLIepLXAU0emfIBsW9kMuJ01965
-JlCV+TtYJnUGARoLCBnUzvk1VpjnwD3ed0E5atrwk0kRNIPoM1N2DAIxaIua4CX1
-XCylBgMuNW/D86lLpCMHE3LA1FPqNyFe7eyspWGwkDTRK/YZbq6zUGXrNhftK2vV
-8CA0E2dKIbi+TsrvJVb2X5T3Mf4EsTd2oqclVGWttfbXYs5Rv0vf63Z01VsiWL7A
-sSZJlfVnCuKYvZnB3Am1vZtlk6mHqQRJ3DZM0vp/AkjZ5x6rPwy8wRaKpVD06yZO
-N3h4XLsidnteUZBbamQEKjfZoo0zw27TG+gqT+MGIaQWieuCMBm5amiwUJvRsfH0
-S/M50nuikiXl67xEd+vNYGiacH2tfOorUKzoAebEM2EkB3xb97kU3X+OyImre1S6
-Gs5TC1QH6dtd6yKQAPoQZtK0EQI6B+Rw+zH9IAQgPz5C9oUgRd62GJzgonYOJzFH
-jzOARKKSRen+bsrm3jldfJ4sPl6sRWRHWVLsWh4AJYK8iAqhu88dYOc1nksslmGn
-fOV80T8Acty1wshO0ukB34maKPBSF+rs1oJ/IHmqXHfx5mfjsdof56tgAyQ/rIUE
-ulpDR8ytvoviXzH550Ydf3w+bFrEPHkEq0IBXi31BpM7PUiN1xryibdY81ukAjSS
-KRQV5RFS6lwdJOGAFkE0gJ7xES8P7FiC+wKHE78WdvNm0trZiiaJNrMebkitw6K4
-rJMpZj0mhCtKryYmz3L857QFYeUJ9M95Y4/Ib6aM7dn/fJMOeUOVtv0wESQ44dwQ
-CPDjfQdBXrshRWOYMZBsJjI0J0oBc0BRrbHo60LNixqL36CKWyCRNKNVKyZfv23G
-sBeoSpFOHxiiATzlgobufJyhwgU4k8idMt8+lMl783FN/0gRYs14ziYdKNYP2ctG
-Bci38D19tWoLidIuyR9l96IGWmzxey/wQI6UaRFFQR5Y9Po40WCq6XZIbeT3wDRF
-0ynTxlQ0TP6802iglC93S26tLL3QN/nF22gOxKIEC/tGMtv1vJRWfC1svPitDMD+
-Y91xJS+rnzzcEMeDvZmgA5JRGn4YCL0RK1AWgMmQB1zBmPEXpIpFGjQiWF/s2Cg4
-l21wEMmVO8mTVk2Rygaf6wvyf2TmblGPTbb2zfrq7lKtrYFR34oIB9FY9nqJoqDz
-g2OZhq59qIjTgbvPjkPdvqqi0Vg7CQ4zBz+zhOLrb5U0NPHvMtvwZn1bW4dzSFQr
-IBIk63tVMLrfMvNScOAviyK7QoHOhHXElSofTBSUtlMBAE8vusHE/mYic92BVlsu
-TofitNVONyJHKst3jKVU68O3pWHp2+7CyA8gzf6ieH33UQU=
-=1q6K
+hQEMAzhuiT4RC8VbAQgAtWAfrUok8EKsWRY2FEZbNeawMXXpuBrMDARxNY1xhV6b
+3Pxz+148na8+KQR0asleOO+9qPKQP0N+HA6W6SJEgrfQ7q3XKdsaZVMJjNaRhb9j
+eOAe1MLr9Ps0Lx93nknI6bPsX8odpa7oNQYqI7QWBphQLVtdBKaYVkoGN7P+xHlu
+j2HDyD3TOfNH2UxywWOMAJixYkcZ6/v1KNS4JsDUe4b5Tf/IegX2LSoY9qH60Psm
+BFMCmYmGg/MlFyQpyo/CYebJu9BWMHHcj2o29W+OaJqCCYVC+XR+h5EtssnPwedK
+D7A5jLu83pzonZQxiheP0JWSrfMlo8HZNcbhZw0IIoUBDANcG2tp6fXqvgEH/1Gp
+3qsm/MfFtRoHzbRaOEIofaKjv79PhdN8p+9tr4J31oMnuJNIVWozW8R1YBzyL6Pg
+UeKZaAsW9zP9+HhQw8ZahX1A3Paz3LhO9By4wkgOt5up7s5QS2klWWUBaF8AIxKF
+FBoNJcc52VH5yBXyiGd5UAHigKRldwE9yIzWKzt4/60/NtVzkfK6j8KFdRyLJZFE
+0IqRjbFxdvMr4hyc1h5wibBonWKRIDEvXqIeOWdUbDHqTekJcXVUrtxw5u8rry2F
+XaqN9FM+++QPFX4hrbIJe1w7/gINH72PnAPApN/MfUaQsGE/noX3CjaINspB+Nhq
+AwUITmBSjdZ0vuEGoWaFAgwDodoT8VqRl4UBD/9d9rQRlpfKi6K3WlLJra4OEtQm
++RTE8I7OQYCQ/C9QFPw1ux0RtCTQF/uL8nKzoWG+LbUgKVRoZQSV1k4L9QZ/YKhd
+5AC4YT76lPCxCemuSfCQ3sb0I4uEa3JiXeBSVPAfwcS3nXhnyhdFLGhdmAs1hnzo
+E9wD4oxk6yHhozWH1QwJ4Syioywmoy5kKJ4tQREpceCvyoZD2b2h4kLUiZKkBZ+a
+Nzo3AQdQJF/Yr4BL2afMwAwd9x9zpdu/LL7k6INsXuM0S1I8ipdBpiKpZTQcAwK9
+P6RF8p8lKpMX5u1PWq7CMJMJGRQnKAXCJoKYathUygePewq3muOx2Cn4fjNGLT0/
+O795/QtCDI9Q4xsY7/uMLvqpr6skFXecH304Mp23unTBK+knBkWRYpaoA9Gfri8j
+ELMkPvsLCjE9gg3vUdgyPDq3Ov2XDXvgsNW36ghiUYmBRhfoXHG/TuG2iEGP32mM
+coW+4q7DpcyjYUXeh80PLYWe68gCmQ4XN8oh2+xPhFwvjKOidW8o1TbRAdf+A+yu
+d6EvKSzSpG8SABrlCofD7HPyrAbwmTP2SdmoamCT8NSiLoXQZk2ZO3xSYp8gR4aP
+fDENcYtqipDyMTPRfPEPMjYnn+AR4d4UD0jowcaeKThg/fKmzwP0N4pm6uaR9pJ6
++MwKToisQk+tLIv44YUCDAPiA8lOXOuz7wEP/0xTmMro6Jb7LG8lOykEomYBrtT+
+meWftSGjQgSQITkw/cVqIzpYTy7HRtP8luiJyvh1Mt8Fm3MlZLgorp0TtxaUPq0y
+yNUWkDXOIu0pcmX1c38rmEIMvnUcREyJlcFv7Y8XAYf8ZP4TUfA3t+wZaFYoiV0M
+Ai4tHGCIDdCf4Q9fSEFIU+UwJ9/zuBglMPJ74x3IPpEnlKtqkgzpT6kUGypIFKA9
+n83ycOiu78FTHQF3ULY05Of2cBKTtNYc0R71QIyHovh1bT+o3EgMwxcKfzZjBAgI
+lGtJG1+mi44FS90zUPWVtRRf0TwCFXw1HSCS7nbL+5831WdcdhWVgWp2RCczVORv
+G4q5SP1yunBOi7HKVRHGW16bAhM/OOgyJ2lLqRSglvrYC6ympXvw85Y6xXKiG9tC
+wAKSJOvkSZ8xoahoVVQwbbD6uiQqpTzl3befxkLd0uz9U43pfW0z9x+r38ussUbM
+bnLMCwGJ9N7dZevN6GkX886S1Tk0CwM8H39tRyb9xowcc/D9Hoy+/jOq1YOtpHXA
+NXc7gbDppTlRJqbrniH9YxIG00+x1K1RKMBjLgBVOhT64n3U0DIBxvVQLmg9kQUN
+6tiDe0toQUhXVV5eu5PivoPuDcHZ7+4FdoAtIeQ7Y2A34HZ4KXSvF3qkf3RPHWvo
+l77A/4GZzFCQW38E0ukBD1W1hTHZcwNaPwRGtD1rvM1f6o7c60sDAAWkMeKzsFRd
+0ADg08xVj447WnZtgy7j2LPd6JeaodHumw95Cwkdd06lupYwG/CV/ZmvB5Ae+llu
+3mPewaSfpmcWvg0QtfDnv8NWW/BQSnPCjjPlVAdVZxm/uRHNKWSApK1UrfDK463e
+Dg60CXE64j/7bX9TCSf7KVHqan08IMvS8i3gqKYNv/9yGR9EnpNvVWv59zHZao64
+yHO/NHPBXgQk+Vn+P2iER6/bZaMkq7HdjkM9KeTBZUmvzfQ69wYhlrTI5HvkLHNP
+FV+oqwgG9KwPtr0zzBp9fyjfMw6081NedsMH+GwiNvibM5ryBULTnziJ4Hm0MbXH
+Yg/XFi/mMwq3rGq+ZauyjSjIuQXxZMQLpuzSWRQx5thwDePGJ7Dx5nGEJQC5NS/x
+HsPsvKEtl/n/CY5x3qV1NScCTKlXiD7mGE/whuO/Aoun+tHXNah/kwXFWyeQkHkS
+xpjm10vq4s6CvieCDKc+QenxpLt1PGBL7yvGVBXBTQbp1N4laYATXzTFr9b/RG5s
+C0aWWLuraflILCD0wxDuZnFVrPVmsfMp86+donIaNvBFwrYyMw9cnVWGoVAIG4LV
+B9vfZaVzhbNgynnwu1JifZzwIytLBHsemRMq5vRUE8ju0z9FP9hhqHLu1pF1dJH2
+fyqFYL44br5M0c2f2xnzGpsca9C7mDBXN5ktR1ts+fHdLELsqg9SwAtqqWCEB/jQ
+T96vkVEydwQ/mVqCtPLGk3NVJ7NjUVISFvQAj4w9vG9fCgD1NIHJco0VgIkvn9Yu
+2pPCrY+NVeibz9vaaUIuOf6lXZCZFOyVTZIPXQyhumel/f0MDs3Lx5ZA3rUL6jS+
+=nGiW
-----END PGP MESSAGE-----
diff --git a/bgpwtf/machines/tests/edge01-waw.nix b/bgpwtf/machines/tests/edge01-waw.nix
index e0298d2..535418f 100644
--- a/bgpwtf/machines/tests/edge01-waw.nix
+++ b/bgpwtf/machines/tests/edge01-waw.nix
@@ -163,6 +163,17 @@
"e4-oob" = { virtual = true; virtualType = "tap"; };
"e7-dcsw" = { virtual = true; virtualType = "tap"; };
};
+ hscloud.anchorvm = {
+ blkdev = "/anchor.img";
+ ram = 32;
+ };
+ systemd.services.anchorTestImg = {
+ requiredBy = [ "anchorvm.service" ];
+ serviceConfig = {
+ Type = "oneshot";
+ ExecStart = "${pkgs.coreutils}/bin/truncate -s 128m /anchor.img";
+ };
+ };
};
speaker = mkBGPSpeaker;
diff --git a/cluster/kube/k0-cockroach.jsonnet b/cluster/kube/k0-cockroach.jsonnet
new file mode 100644
index 0000000..faf0f14
--- /dev/null
+++ b/cluster/kube/k0-cockroach.jsonnet
@@ -0,0 +1,7 @@
+// Only the CockroachDB cluster - nodes, clients etc.
+
+local k0 = (import "k0.libsonnet").k0;
+
+{
+ cockroach: k0.cockroach,
+}
diff --git a/cluster/kube/k0.libsonnet b/cluster/kube/k0.libsonnet
index ad6b735..44f83d0 100644
--- a/cluster/kube/k0.libsonnet
+++ b/cluster/kube/k0.libsonnet
@@ -65,7 +65,7 @@
topology: [
{ name: "bc01n01", node: "bc01n01.hswaw.net" },
{ name: "bc01n02", node: "bc01n02.hswaw.net" },
- { name: "bc01n03", node: "bc01n03.hswaw.net" },
+ { name: "dcr01s22", node: "dcr01s22.hswaw.net" },
],
// Host path on SSD.
hostPath: "/var/db/crdb-waw1",
@@ -80,112 +80,19 @@
buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
sso: k0.cockroach.waw2.Client("sso"),
herpDev: k0.cockroach.waw2.Client("herp-dev"),
+ gitea: k0.cockroach.waw2.Client("gitea"),
},
},
ceph: {
// waw1 cluster - dead as of 2019/08/06, data corruption
- // waw2 cluster: shitty 7200RPM 2.5" HDDs
- waw2: rook.Cluster(k0.cluster.rook, "ceph-waw2") {
- spec: {
- mon: {
- count: 3,
- allowMultiplePerNode: false,
- },
- storage: {
- useAllNodes: false,
- useAllDevices: false,
- config: {
- databaseSizeMB: "1024",
- journalSizeMB: "1024",
- },
- nodes: [
- {
- name: "bc01n01.hswaw.net",
- location: "rack=dcr01 chassis=bc01 host=bc01n01",
- devices: [ { name: "sda" } ],
- },
- {
- name: "bc01n02.hswaw.net",
- location: "rack=dcr01 chassis=bc01 host=bc01n02",
- devices: [ { name: "sda" } ],
- },
- {
- name: "bc01n03.hswaw.net",
- location: "rack=dcr01 chassis=bc01 host=bc01n03",
- devices: [ { name: "sda" } ],
- },
- ],
- },
- benji:: {
- metadataStorageClass: "waw-hdd-paranoid-2",
- encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
- pools: [
- "waw-hdd-redundant-2",
- "waw-hdd-redundant-2-metadata",
- "waw-hdd-paranoid-2",
- "waw-hdd-yolo-2",
- ],
- s3Configuration: {
- awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
- awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
- bucketName: "benji-k0-backups",
- endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
- },
- }
- },
- },
- waw2Pools: {
- // redundant block storage
- blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
- spec: {
- failureDomain: "host",
- erasureCoded: {
- dataChunks: 2,
- codingChunks: 1,
- },
- },
- },
- // paranoid block storage (3 replicas)
- blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
- spec: {
- failureDomain: "host",
- replicated: {
- size: 3,
- },
- },
- },
- // yolo block storage (no replicas!)
- blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
- spec: {
- failureDomain: "host",
- replicated: {
- size: 1,
- },
- },
- },
- objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
- spec: {
- metadataPool: {
- failureDomain: "host",
- replicated: { size: 3 },
- },
- dataPool: {
- failureDomain: "host",
- erasureCoded: {
- dataChunks: 2,
- codingChunks: 1,
- },
- },
- },
- },
- },
+ // waw2 cluster - dead as of 2021/01/22, torn down (horrible M610 RAID controllers are horrible)
// waw3: 6TB SAS 3.5" HDDs
waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
spec: {
mon: {
- count: 3,
+ count: 1,
allowMultiplePerNode: false,
},
storage: {
@@ -267,8 +174,8 @@
spec: {
failureDomain: "osd",
erasureCoded: {
- dataChunks: 12,
- codingChunks: 4,
+ dataChunks: 2,
+ codingChunks: 1,
},
},
},
@@ -383,7 +290,6 @@
# bits they use, whatever those might be.
# TODO(q3k): fix this?
unnecessarilyInsecureNamespaces: [
- policies.AllowNamespaceInsecure("ceph-waw2"),
policies.AllowNamespaceInsecure("ceph-waw3"),
policies.AllowNamespaceInsecure("matrix"),
policies.AllowNamespaceInsecure("registry"),
diff --git a/cluster/kube/lib/cockroachdb.libsonnet b/cluster/kube/lib/cockroachdb.libsonnet
index 8ebad52..9f206f0 100644
--- a/cluster/kube/lib/cockroachdb.libsonnet
+++ b/cluster/kube/lib/cockroachdb.libsonnet
@@ -42,7 +42,7 @@
local cluster = self,
cfg:: {
- image: "cockroachdb/cockroach:v19.1.0",
+ image: "cockroachdb/cockroach:v20.2.4",
# Must be unique per cluster.
portServe: 26257,
diff --git a/cluster/kube/lib/rook.libsonnet b/cluster/kube/lib/rook.libsonnet
index 8f83d2d..c8e38a8 100644
--- a/cluster/kube/lib/rook.libsonnet
+++ b/cluster/kube/lib/rook.libsonnet
@@ -10,7 +10,7 @@
local env = self,
local cfg = env.cfg,
cfg:: {
- image: "rook/ceph:v1.1.9",
+ image: "rook/ceph:v1.2.7",
namespace: "rook-ceph-system",
},
@@ -54,6 +54,7 @@
},
dataDirHostPath: oa.String { pattern: "^/(\\S+)" },
skipUpgradeChecks: oa.Boolean,
+ continueUpgradeAfterChecksEvenIfNotHealthy: oa.Boolean,
mon: oa.Dict {
allowMultiplePerNode: oa.Boolean,
count: oa.Integer { minimum: 0, maximum: 9 },
@@ -236,6 +237,15 @@
subresources: { status: {} },
},
},
+ cephclients: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephClient") {
+ spec+: {
+ validation: oa.Validation(oa.Dict {
+ spec: oa.Dict {
+ caps: oa.Any,
+ },
+ }),
+ },
+ },
},
sa: {
@@ -307,9 +317,24 @@
},
{
apiGroups: ["policy", "apps"],
- resources: ["poddisruptionbudgets", "deployments"],
+ resources: ["poddisruptionbudgets", "deployments", "replicasets"],
verbs: ["*"],
},
+ {
+ apiGroups: ["healthchecking.openshift.io"],
+ resources: ["machinedisruptionbudgets"],
+ verbs: ["get", "list", "watch", "create", "update", "delete"],
+ },
+ {
+ apiGroups: ["machine.openshift.io"],
+ resources: ["machines"],
+ verbs: ["get", "list", "watch", "create", "update", "delete"],
+ },
+ {
+ apiGroups: ["storage.k8s.io"],
+ resources: ["csidrivers"],
+ verbs: ["create"],
+ },
],
},
@@ -733,7 +758,17 @@
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
- }
+ },
+ ],
+ },
+ osdCluster: kube.ClusterRole(cluster.name("osd-cluster")) {
+ metadata+: cluster.metadata { namespace:: null },
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["nodes"],
+ verbs: ["get", "list"],
+ },
],
},
mgr: kube.Role(cluster.name("mgr")) {
@@ -802,13 +837,21 @@
subjects_: [cluster.sa.mgr],
},
+ osdClusterRB: kube.ClusterRoleBinding(cluster.name("osd-cluster")) {
+ metadata+: {
+ namespace:: null,
+ },
+ roleRef_: cluster.roles.osdCluster,
+ subjects_: [cluster.sa.osd],
+ },
+
+
cluster: kube._Object("ceph.rook.io/v1", "CephCluster", name) {
metadata+: cluster.metadata,
spec: {
cephVersion: {
# https://github.com/rook/rook/issues/2945#issuecomment-483964014
- #image: "ceph/ceph:v13.2.5-20190319",
- image: "ceph/ceph:v14.2.2-20190830",
+ image: "ceph/ceph:v14.2.16",
allowUnsupported: true,
},
dataDirHostPath: if name == "ceph-waw2" then "/var/lib/rook" else "/var/lib/rook-%s" % [name],
diff --git a/cluster/nix/defs-machines.nix b/cluster/nix/defs-machines.nix
index 4c50f67..da9150c 100644
--- a/cluster/nix/defs-machines.nix
+++ b/cluster/nix/defs-machines.nix
@@ -25,19 +25,10 @@
mgmtIf = "eno1";
stateVersion = "18.09";
}
- rec {
- name = "bc01n03";
- threads = 16;
- fqdn = "${name}.hswaw.net";
- ipAddr = "185.236.240.37";
- ipAddrBits = 28;
- gw = "185.236.240.33";
- podNet = "10.10.18.0/24";
- diskBoot = "/dev/disk/by-id/scsi-360024e8078a8fa0023b1787e0605a3e0";
- fsRoot = "/dev/disk/by-uuid/afc05836-c9b3-4d7e-b0a2-3ebfe2336d4f";
- mgmtIf = "eno1";
- stateVersion = "18.09";
- }
+ # Tombstone - bc01n03 suffered from hardware failure on 2021/01/10.
+ # rec {
+ # name = "bc01n03";
+ # }
rec {
name = "dcr01s22";
threads = 48;
diff --git a/devtools/gerrit/BUILD b/devtools/gerrit/BUILD
index 406200e..898b13f 100644
--- a/devtools/gerrit/BUILD
+++ b/devtools/gerrit/BUILD
@@ -2,7 +2,7 @@
container_image(
name="with_plugins",
- base="@gerrit-3.0.8//image",
+ base="@gerrit-3.3.0//image",
files = [
"//devtools/gerrit/gerrit-oauth-provider:gerrit-oauth-provider",
"@com_googlesource_gerrit_plugin_owners//owners:owners.jar",
@@ -13,7 +13,7 @@
directory = "/var/gerrit-plugins",
)
container_image(
- name="3.0.8-r1",
+ name="3.3.0-r7",
base=":with_plugins",
files = [":entrypoint.sh"],
directory = "/",
@@ -22,9 +22,9 @@
container_push(
name = "push",
- image = ":3.0.8-r1",
+ image = ":3.3.0-r7",
format = "Docker",
registry = "registry.k0.hswaw.net",
- repository = "devtools/gerrit",
- tag = "3.0.8-r1",
+ repository = "q3k/gerrit",
+ tag = "3.3.0-r7",
)
diff --git a/devtools/gerrit/kube/gerrit.libsonnet b/devtools/gerrit/kube/gerrit.libsonnet
index ce2982d..bebb3cf 100644
--- a/devtools/gerrit/kube/gerrit.libsonnet
+++ b/devtools/gerrit/kube/gerrit.libsonnet
@@ -38,8 +38,8 @@
address: "gerrit@hackerspace.pl",
},
- tag: "3.0.8-r1",
- image: "registry.k0.hswaw.net/devtools/gerrit:" + cfg.tag,
+ tag: "3.3.0-r7",
+ image: "registry.k0.hswaw.net/q3k/gerrit:" + cfg.tag,
resources: {
requests: {
cpu: "100m",
diff --git a/devtools/gerrit/kube/prod.jsonnet b/devtools/gerrit/kube/prod.jsonnet
index 68f68cc..5f47210 100644
--- a/devtools/gerrit/kube/prod.jsonnet
+++ b/devtools/gerrit/kube/prod.jsonnet
@@ -11,7 +11,7 @@
domain: "gerrit.hackerspace.pl",
identity: "7b6244cf-e30b-42c5-ba91-c329ef4e6cf1",
- storageClassName: "waw-hdd-paranoid-2",
+ storageClassName: "waw-hdd-redundant-3",
secureSecret: "gerrit",
},
diff --git a/devtools/kube/depotview.libsonnet b/devtools/kube/depotview.libsonnet
index 019542c..f82ff84 100644
--- a/devtools/kube/depotview.libsonnet
+++ b/devtools/kube/depotview.libsonnet
@@ -10,7 +10,12 @@
local depotview = self,
cfg+: {
image: cfg.image,
- container: depotview.GoContainer("main", "/devtools/depotview") {}
+ container: depotview.GoContainer("main", "/devtools/depotview") {
+ resources: {
+ requests: { cpu: "25m", memory: "256Mi" },
+ limits: { cpu: "500m", memory: "512Mi" },
+ },
+ }
},
}
}
diff --git a/kube/postgres.libsonnet b/kube/postgres.libsonnet
index e89e9db..a80c236 100644
--- a/kube/postgres.libsonnet
+++ b/kube/postgres.libsonnet
@@ -18,6 +18,12 @@
password: error "password must be set",
storageSize: "30Gi",
+
+ # This option can be used to customize initial database creation. For
+ # available options see: https://www.postgresql.org/docs/9.5/app-initdb.html
+ # Changing this option in already existing deployments will not affect
+ # existing database.
+ initdbArgs: null,
},
makeName(suffix):: cfg.prefix + suffix,
@@ -63,7 +69,9 @@
POSTGRES_USER: cfg.username,
POSTGRES_PASSWORD: cfg.password,
PGDATA: "/var/lib/postgresql/data/pgdata",
- },
+ } + if cfg.initdbArgs != null then {
+ POSTGRES_INITDB_ARGS: cfg.initdbArgs,
+ } else {},
volumeMounts_: {
data: { mountPath: "/var/lib/postgresql/data" },
},
diff --git a/ops/sso/prod.jsonnet b/ops/sso/prod.jsonnet
new file mode 100644
index 0000000..07f152e
--- /dev/null
+++ b/ops/sso/prod.jsonnet
@@ -0,0 +1,15 @@
+local sso = import "sso.libsonnet";
+
+{
+ sso: sso {
+ cfg+: {
+ domain: "sso.hackerspace.pl",
+ database+: {
+ host: "public.crdb-waw1.svc.cluster.local",
+ name: "sso",
+ username: "sso",
+ tlsSecret: "client-sso-certificate",
+ },
+ },
+ },
+}
diff --git a/ops/sso/sso.libsonnet b/ops/sso/sso.libsonnet
new file mode 100644
index 0000000..3ac3002
--- /dev/null
+++ b/ops/sso/sso.libsonnet
@@ -0,0 +1,126 @@
+# kubectl create secret generic sso --from-literal=secret_key=$(pwgen 24 1) --from-literal=ldap_bind_password=...
+
+local kube = import "../../kube/kube.libsonnet";
+
+{
+ local app = self,
+ local cfg = app.cfg,
+
+ cfg:: {
+ namespace: "sso",
+ image: "registry.k0.hswaw.net/informatic/sso-v2@sha256:a44055a4f1d2a4e0708838b571f3a3c018f3b97adfea71ae0cf1df98246bf6cf",
+ domain: error "domain must be set",
+ database: {
+ host: error "database.host must be set",
+ name: error "database.name must be set",
+ username: error "database.username must be set",
+ port: 26257,
+ tlsSecret: error "database.tlsSecret must be set",
+ },
+ },
+
+ ns: kube.Namespace(app.cfg.namespace),
+
+ deployment: app.ns.Contain(kube.Deployment("sso")) {
+ spec+: {
+ replicas: 1,
+ template+: {
+ spec+: {
+ volumes_: {
+ crdb: {
+ secret: {
+ secretName: cfg.database.tlsSecret,
+ defaultMode: std.parseOctal("0600"),
+ },
+ },
+ tlscopy: kube.EmptyDirVolume(), # see initContainers_.secretCopy
+ },
+ securityContext: {
+ runAsUser: 100,
+ runAsGroup: 101,
+ fsGroup: 101,
+ },
+ initContainers_: {
+ # psycopg2 / libpq wants its TLS secret keys to be only
+ # readable by running process. As k8s exposes
+ # secrets/configmaps as symlinks, libpq gets confused
+ # and refuses to start, unless we dereference these into
+ # a local copy with proper permissions.
+ secretCopy: kube.Container("secret-copy") {
+ image: cfg.image,
+ command: ["sh", "-c", "cp -fv /tls-orig/* /tls && chmod 0400 /tls/*"],
+ volumeMounts_: {
+ crdb: { mountPath: "/tls-orig" },
+ tlscopy: { mountPath: "/tls" },
+ },
+ },
+ },
+ containers_: {
+ web: kube.Container("sso") {
+ image: cfg.image,
+ ports_: {
+ http: { containerPort: 5000 },
+ },
+ env_: {
+ DATABASE_URI: "cockroachdb://%s@%s:%d/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s" % [
+ cfg.database.username,
+ cfg.database.host,
+ cfg.database.port,
+ cfg.database.name,
+ "/tls/ca.crt",
+ "/tls/tls.crt",
+ "/tls/tls.key",
+ ],
+
+ LDAP_BIND_PASSWORD: { secretKeyRef: { name: "sso", key: "ldap_bind_password" } },
+ SECRET_KEY: { secretKeyRef: { name: "sso", key: "secret_key" } },
+ LOGGING_LEVEL: "DEBUG",
+ },
+ volumeMounts_: {
+ tlscopy: { mountPath: "/tls" },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ svc: app.ns.Contain(kube.Service("sso")) {
+ target_pod:: app.deployment.spec.template,
+ spec+: {
+ ports: [
+ { name: "http", port: 5000, targetPort: 5000, protocol: "TCP" },
+ ],
+ type: "ClusterIP",
+ },
+ },
+
+ ingress: app.ns.Contain(kube.Ingress("sso")) {
+ metadata+: {
+ annotations+: {
+ "kubernetes.io/tls-acme": "true",
+ "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+ "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+ },
+ },
+ spec+: {
+ tls: [
+ {
+ hosts: [cfg.domain],
+ secretName: "sso-tls",
+ },
+ ],
+ rules: [
+ {
+ host: cfg.domain,
+ http: {
+ paths: [
+ { path: "/", backend: app.svc.name_port },
+ ]
+ },
+ }
+ ],
+ },
+ },
+}