bazel-cache: deploy, add waw-hdd-yolo-1 ceph pool
diff --git a/app/bazel-cache/prod.jsonnet b/app/bazel-cache/prod.jsonnet
new file mode 100644
index 0000000..9b65e95
--- /dev/null
+++ b/app/bazel-cache/prod.jsonnet
@@ -0,0 +1,108 @@
+# bazel-cache.k0.hswaw.net, a Bazel Cache based on buchgr/bazel-remote.
+# Once https://github.com/bazelbuild/bazel/pull/4889 gets merged, this will
+# likely be replaced with just an Rados GW instance.
+
+local kube = import "../../kube/kube.libsonnet";
+
+{
+ local app = self,
+ local cfg = app.cfg,
+ cfg:: {
+ namespace: "bazel-cache",
+ domain: "k0.hswaw.net",
+ storageClassName: "waw-hdd-yolo-1",
+ },
+
+ metadata(component):: {
+ namespace: cfg.namespace,
+ labels: {
+ "app.kubernetes.io/name": "bazel-cache",
+ "app.kubernetes.io/managed-by": "kubecfg",
+ "app.kubernetes.io/component": component,
+ },
+ },
+
+ namespace: kube.Namespace(cfg.namespace),
+
+ volumeClaim: kube.PersistentVolumeClaim("bazel-cache-storage") {
+ metadata+: app.metadata("bazel-cache-storage"),
+ spec+: {
+ storageClassName: cfg.storageClassName,
+ accessModes: [ "ReadWriteOnce" ],
+ resources: {
+ requests: {
+ storage: "40Gi",
+ },
+ },
+ },
+ },
+
+
+ deployment: kube.Deployment("bazel-remote") {
+ metadata+: app.metadata("bazel-cache"),
+ spec+: {
+ replicas: 1,
+ template+: {
+ spec+: {
+ volumes_: {
+ data: kube.PersistentVolumeClaimVolume(app.volumeClaim),
+ },
+ containers_: {
+ auth: kube.Container("bazel-remote") {
+ image: "buchgr/bazel-remote-cache",
+ volumeMounts_: {
+ data: { mountPath: "/data" },
+ },
+ ports_: {
+ http: {
+ containerPort: 8080,
+ protocol: "TCP",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ service: kube.Service("bazel-cache") {
+ metadata+: app.metadata("bazel-cache"),
+ target_pod:: app.deployment.spec.template,
+ spec+: {
+ type: "ClusterIP",
+ ports: [
+ { name: "http", port: 8080, targetPort: 8080, protocol: "TCP" },
+ ],
+ }
+ },
+
+ ingress: kube.Ingress("bazel-cache") {
+ metadata+: app.metadata("bazel-cache") {
+ annotations+: {
+ "kubernetes.io/tls-acme": "true",
+ "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+ "nginx.ingress.kubernetes.io/backend-protocol": "HTTP",
+ "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+ },
+ },
+ spec+: {
+ tls: [
+ {
+ hosts: ["bazel-cache.%s" % [cfg.domain]],
+ secretName: "bazel-cache-tls",
+ },
+ ],
+ rules: [
+ {
+ host: "bazel-cache.%s" % [cfg.domain],
+ http: {
+ paths: [
+ { path: "/", backend: app.service.name_port },
+ ]
+ },
+ }
+ ],
+ },
+ },
+}
diff --git a/cluster/README b/cluster/README
index 39f9190..f5b5970 100644
--- a/cluster/README
+++ b/cluster/README
@@ -34,6 +34,7 @@
The following storage classes use this cluster:
- `waw-hdd-redundant-1` - erasure coded 2.1
+ - `waw-hdd-yolo-1` - unreplicated (you _will_ lose your data)
- `waw-hdd-redundant-1-object` - erasure coded 2.1 object store
A dashboard is available at https://ceph-waw1.hswaw.net/, to get the admin password run:
diff --git a/cluster/kube/cluster.jsonnet b/cluster/kube/cluster.jsonnet
index a4d8680..42dd2e3 100644
--- a/cluster/kube/cluster.jsonnet
+++ b/cluster/kube/cluster.jsonnet
@@ -122,6 +122,15 @@
},
},
},
+ // yolo block storage (no replicas!)
+ cephWaw1Yolo: rook.ReplicatedBlockPool(cluster.cephWaw1, "waw-hdd-yolo-1") {
+ spec: {
+ failureDomain: "host",
+ replicated: {
+ size: 1,
+ },
+ },
+ },
cephWaw1Object: rook.S3ObjectStore(cluster.cephWaw1, "waw-hdd-redundant-1-object") {
spec: {
metadataPool: {
diff --git a/cluster/kube/lib/rook.libsonnet b/cluster/kube/lib/rook.libsonnet
index f75e9f0..5223654 100644
--- a/cluster/kube/lib/rook.libsonnet
+++ b/cluster/kube/lib/rook.libsonnet
@@ -469,6 +469,26 @@
}
},
+ ReplicatedBlockPool(cluster, name):: {
+ local pool = self,
+ spec:: error "spec must be specified",
+
+ pool: kube._Object("ceph.rook.io/v1", "CephBlockPool", name) {
+ metadata+: cluster.metadata,
+ spec: pool.spec,
+ },
+
+ storageClass: kube.StorageClass(name) {
+ provisioner: "ceph.rook.io/block",
+ parameters: {
+ blockPool: pool.pool.metadata.name,
+ clusterNamespace: pool.pool.metadata.namespace,
+ fstype: "ext4",
+ },
+ reclaimPolicy: "Retain",
+ },
+ },
+
ECBlockPool(cluster, name):: {
local pool = self,
spec:: error "spec must be specified",