Sergiusz Bazanski | dbfa988 | 2020-06-06 01:21:45 +0200 | [diff] [blame] | 1 | # Common cluster configuration. |
| 2 | # This defines what Kubernetes resources are required to turn a bare k8s |
| 3 | # deployment into a fully working cluster. |
| 4 | # These assume that you're running on bare metal, and using the corresponding |
| 5 | # NixOS deployment that we do. |
| 6 | |
| 7 | local kube = import "../../kube/kube.libsonnet"; |
| 8 | local policies = import "../../kube/policies.libsonnet"; |
| 9 | |
| 10 | local calico = import "lib/calico.libsonnet"; |
| 11 | local certmanager = import "lib/cert-manager.libsonnet"; |
| 12 | local coredns = import "lib/coredns.libsonnet"; |
| 13 | local metallb = import "lib/metallb.libsonnet"; |
| 14 | local metrics = import "lib/metrics.libsonnet"; |
| 15 | local nginx = import "lib/nginx.libsonnet"; |
| 16 | local prodvider = import "lib/prodvider.libsonnet"; |
| 17 | local rook = import "lib/rook.libsonnet"; |
| 18 | local pki = import "lib/pki.libsonnet"; |
| 19 | |
| 20 | { |
| 21 | Cluster(short, realm):: { |
| 22 | local cluster = self, |
| 23 | local cfg = cluster.cfg, |
| 24 | |
| 25 | short:: short, |
| 26 | realm:: realm, |
| 27 | fqdn:: "%s.%s" % [cluster.short, cluster.realm], |
| 28 | |
| 29 | cfg:: { |
| 30 | // Storage class used for internal services (like registry). This must |
| 31 | // be set to a valid storage class. This can either be a cloud provider class |
| 32 | // (when running on GKE &co) or a storage class created using rook. |
| 33 | storageClassNameRedundant: error "storageClassNameRedundant must be set", |
| 34 | }, |
| 35 | |
| 36 | // These are required to let the API Server contact kubelets. |
| 37 | crAPIServerToKubelet: kube.ClusterRole("system:kube-apiserver-to-kubelet") { |
| 38 | metadata+: { |
| 39 | annotations+: { |
| 40 | "rbac.authorization.kubernetes.io/autoupdate": "true", |
| 41 | }, |
| 42 | labels+: { |
| 43 | "kubernetes.io/bootstrapping": "rbac-defaults", |
| 44 | }, |
| 45 | }, |
| 46 | rules: [ |
| 47 | { |
| 48 | apiGroups: [""], |
| 49 | resources: ["nodes/%s" % r for r in [ "proxy", "stats", "log", "spec", "metrics" ]], |
| 50 | verbs: ["*"], |
| 51 | }, |
| 52 | ], |
| 53 | }, |
| 54 | crbAPIServer: kube.ClusterRoleBinding("system:kube-apiserver") { |
| 55 | roleRef: { |
| 56 | apiGroup: "rbac.authorization.k8s.io", |
| 57 | kind: "ClusterRole", |
| 58 | name: cluster.crAPIServerToKubelet.metadata.name, |
| 59 | }, |
| 60 | subjects: [ |
| 61 | { |
| 62 | apiGroup: "rbac.authorization.k8s.io", |
| 63 | kind: "User", |
| 64 | # A cluster API Server authenticates with a certificate whose CN is == to the FQDN of the cluster. |
| 65 | name: cluster.fqdn, |
| 66 | }, |
| 67 | ], |
| 68 | }, |
| 69 | |
| 70 | // This ClusterRole is bound to all humans that log in via prodaccess/prodvider/SSO. |
| 71 | // It should allow viewing of non-sensitive data for debugability and openness. |
| 72 | crViewer: kube.ClusterRole("system:viewer") { |
| 73 | rules: [ |
| 74 | { |
| 75 | apiGroups: [""], |
| 76 | resources: [ |
| 77 | "nodes", |
| 78 | "namespaces", |
| 79 | "pods", |
| 80 | "configmaps", |
| 81 | "services", |
| 82 | ], |
| 83 | verbs: ["list"], |
| 84 | }, |
| 85 | { |
| 86 | apiGroups: ["metrics.k8s.io"], |
| 87 | resources: [ |
| 88 | "nodes", |
| 89 | "pods", |
| 90 | ], |
| 91 | verbs: ["list"], |
| 92 | }, |
| 93 | { |
| 94 | apiGroups: ["apps"], |
| 95 | resources: [ |
| 96 | "statefulsets", |
| 97 | ], |
| 98 | verbs: ["list"], |
| 99 | }, |
| 100 | { |
| 101 | apiGroups: ["extensions"], |
| 102 | resources: [ |
| 103 | "deployments", |
| 104 | "ingresses", |
| 105 | ], |
| 106 | verbs: ["list"], |
| 107 | } |
| 108 | ], |
| 109 | }, |
| 110 | // This ClusterRole is applied (scoped to personal namespace) to all humans. |
| 111 | crFullInNamespace: kube.ClusterRole("system:admin-namespace") { |
| 112 | rules: [ |
| 113 | { |
| 114 | apiGroups: ["", "extensions", "apps"], |
| 115 | resources: ["*"], |
| 116 | verbs: ["*"], |
| 117 | }, |
| 118 | { |
| 119 | apiGroups: ["batch"], |
| 120 | resources: ["jobs", "cronjobs"], |
| 121 | verbs: ["*"], |
| 122 | }, |
| 123 | ], |
| 124 | }, |
| 125 | // This ClusterRoleBindings allows root access to cluster admins. |
| 126 | crbAdmins: kube.ClusterRoleBinding("system:admins") { |
| 127 | roleRef: { |
| 128 | apiGroup: "rbac.authorization.k8s.io", |
| 129 | kind: "ClusterRole", |
| 130 | name: "cluster-admin", |
| 131 | }, |
| 132 | subjects: [ |
| 133 | { |
| 134 | apiGroup: "rbac.authorization.k8s.io", |
| 135 | kind: "User", |
| 136 | name: user + "@hackerspace.pl", |
| 137 | } for user in [ |
| 138 | "q3k", |
| 139 | "implr", |
| 140 | "informatic", |
| 141 | ] |
| 142 | ], |
| 143 | }, |
| 144 | |
| 145 | podSecurityPolicies: policies.Cluster {}, |
| 146 | |
| 147 | allowInsecureNamespaces: [ |
| 148 | policies.AllowNamespaceInsecure("kube-system"), |
| 149 | policies.AllowNamespaceInsecure("metallb-system"), |
| 150 | ], |
| 151 | |
| 152 | // Allow all service accounts (thus all controllers) to create secure pods. |
| 153 | crbAllowServiceAccountsSecure: kube.ClusterRoleBinding("policy:allow-all-secure") { |
| 154 | roleRef_: cluster.podSecurityPolicies.secureRole, |
| 155 | subjects: [ |
| 156 | { |
| 157 | kind: "Group", |
| 158 | apiGroup: "rbac.authorization.k8s.io", |
| 159 | name: "system:serviceaccounts", |
| 160 | } |
| 161 | ], |
| 162 | }, |
| 163 | |
| 164 | // Calico network fabric |
| 165 | calico: calico.Environment {}, |
| 166 | |
| 167 | // CoreDNS for this cluster. |
| 168 | dns: coredns.Environment { |
| 169 | cfg+: { |
| 170 | cluster_domains: [ |
| 171 | "cluster.local", |
| 172 | cluster.fqdn, |
| 173 | ], |
| 174 | }, |
| 175 | }, |
| 176 | |
| 177 | // Metrics Server |
| 178 | metrics: metrics.Environment {}, |
| 179 | |
| 180 | // Metal Load Balancer |
| 181 | metallb: metallb.Environment {}, |
| 182 | |
| 183 | // Main nginx Ingress Controller |
| 184 | nginx: nginx.Environment {}, |
| 185 | |
| 186 | // Cert-manager (Let's Encrypt, CA, ...) |
| 187 | certmanager: certmanager.Environment {}, |
| 188 | |
| 189 | issuer: kube.ClusterIssuer("letsencrypt-prod") { |
| 190 | spec: { |
| 191 | acme: { |
| 192 | server: "https://acme-v02.api.letsencrypt.org/directory", |
| 193 | email: "bofh@hackerspace.pl", |
| 194 | privateKeySecretRef: { |
| 195 | name: "letsencrypt-prod" |
| 196 | }, |
| 197 | http01: {}, |
| 198 | }, |
| 199 | }, |
| 200 | }, |
| 201 | |
| 202 | // Rook Ceph storage operator. |
| 203 | rook: rook.Operator { |
| 204 | operator+: { |
| 205 | spec+: { |
| 206 | replicas: 1, |
| 207 | }, |
| 208 | }, |
| 209 | }, |
| 210 | |
| 211 | // TLS PKI machinery (compatibility with mirko) |
| 212 | pki: pki.Environment(cluster.short, cluster.realm), |
| 213 | |
| 214 | // Prodvider |
| 215 | prodvider: prodvider.Environment { |
| 216 | cfg+: { |
| 217 | apiEndpoint: "kubernetes.default.svc.%s" % [cluster.fqdn], |
| 218 | }, |
| 219 | }, |
| 220 | }, |
| 221 | } |