blob: 1b988b9eb5073b301802ccf9e6622cb271e0add3 [file] [log] [blame]
# Top level cluster configuration.
local kube = import "../../kube/kube.libsonnet";
local calico = import "lib/calico.libsonnet";
local certmanager = import "lib/cert-manager.libsonnet";
local cockroachdb = import "lib/cockroachdb.libsonnet";
local coredns = import "lib/coredns.libsonnet";
local metallb = import "lib/metallb.libsonnet";
local metrics = import "lib/metrics.libsonnet";
local nginx = import "lib/nginx.libsonnet";
local rook = import "lib/rook.libsonnet";
local Cluster(fqdn) = {
local cluster = self,
// These are required to let the API Server contact kubelets.
crAPIServerToKubelet: kube.ClusterRole("system:kube-apiserver-to-kubelet") {
metadata+: {
annotations+: {
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
labels+: {
"kubernets.io/bootstrapping": "rbac-defaults",
},
},
rules: [
{
apiGroups: [""],
resources: ["nodes/%s" % r for r in [ "proxy", "stats", "log", "spec", "metrics" ]],
verbs: ["*"],
},
],
},
crbAPIServer: kube.ClusterRoleBinding("system:kube-apiserver") {
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
name: cluster.crAPIServerToKubelet.metadata.name,
},
subjects: [
{
apiGroup: "rbac.authorization.k8s.io",
kind: "User",
# A cluster API Server authenticates with a certificate whose CN is == to the FQDN of the cluster.
name: fqdn,
},
],
},
// Calico network fabric
calico: calico.Environment {},
// CoreDNS for this cluster.
dns: coredns.Environment {},
// Metrics Server
metrics: metrics.Environment {},
// Metal Load Balancer
metallb: metallb.Environment {
cfg+: {
addressPools: [
{ name: "public-v4-1", protocol: "layer2", addresses: ["185.236.240.50-185.236.240.63"] },
],
},
},
// Main nginx Ingress Controller
nginx: nginx.Environment {},
certmanager: certmanager.Environment {},
issuer: certmanager.ClusterIssuer("letsencrypt-prod") {
spec: {
acme: {
server: "https://acme-v02.api.letsencrypt.org/directory",
email: "bofh@hackerspace.pl",
privateKeySecretRef: {
name: "letsencrypt-prod"
},
http01: {},
},
},
},
// Rook Ceph storage
rook: rook.Operator {
operator+: {
spec+: {
// TODO(q3k): Bring up the operator again when stability gets fixed
// See: https://github.com/rook/rook/issues/3059#issuecomment-492378873
replicas: 0,
},
},
},
};
{
k0: {
local k0 = self,
cluster: Cluster("k0.hswaw.net"),
cockroach: {
waw1: cockroachdb.Cluster("crdb-waw1") {
cfg+: {
topology: [
{ name: "bc01n01", node: "bc01n01.hswaw.net", ip: "185.236.240.35" },
{ name: "bc01n02", node: "bc01n02.hswaw.net", ip: "185.236.240.36" },
{ name: "bc01n03", node: "bc01n03.hswaw.net", ip: "185.236.240.37" },
],
hostPath: "/var/db/crdb-waw1",
},
},
},
ceph: {
// waw1 cluster
waw1: rook.Cluster(k0.cluster.rook, "ceph-waw1") {
spec: {
mon: {
count: 3,
allowMultiplePerNode: false,
},
storage: {
useAllNodes: false,
useAllDevices: false,
config: {
databaseSizeMB: "1024",
journalSizeMB: "1024",
},
nodes: [
{
name: "bc01n01.hswaw.net",
location: "rack=dcr01 chassis=bc01 host=bc01n01",
devices: [ { name: "sda" } ],
},
{
name: "bc01n02.hswaw.net",
location: "rack=dcr01 chassis=bc01 host=bc01n02",
devices: [ { name: "sda" } ],
},
{
name: "bc01n03.hswaw.net",
location: "rack=dcr01 chassis=bc01 host=bc01n03",
devices: [ { name: "sda" } ],
},
],
},
},
},
// redundant block storage
blockRedundant: rook.ECBlockPool(k0.ceph.waw1, "waw-hdd-redundant-1") {
spec: {
failureDomain: "host",
erasureCoded: {
dataChunks: 2,
codingChunks: 1,
},
},
},
// yolo block storage (no replicas!)
blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw1, "waw-hdd-yolo-1") {
spec: {
failureDomain: "host",
replicated: {
size: 1,
},
},
},
objectRedundant: rook.S3ObjectStore(k0.ceph.waw1, "waw-hdd-redundant-1-object") {
spec: {
metadataPool: {
failureDomain: "host",
replicated: { size: 3 },
},
dataPool: {
failureDomain: "host",
erasureCoded: {
dataChunks: 2,
codingChunks: 1,
},
},
},
},
},
},
}