cluster: deploy calico and metrics service
diff --git a/cluster/certs/ca.srl b/cluster/certs/ca.srl
index c15d2a2..ad8865b 100644
--- a/cluster/certs/ca.srl
+++ b/cluster/certs/ca.srl
@@ -1 +1 @@
-80F13FCE5DBBF736
+80F13FCE5DBBF737
diff --git a/cluster/certs/kube-calico.crt b/cluster/certs/kube-calico.crt
new file mode 100644
index 0000000..bf82329
--- /dev/null
+++ b/cluster/certs/kube-calico.crt
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIF0DCCA7gCCQCA8T/OXbv3NzANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMC
+UEwxFDASBgNVBAgMC01hem93aWVja2llMREwDwYDVQQHDAhXYXJzemF3YTEuMCwG
+A1UECgwlU3Rvd2FyenlzemVuaWUgV2Fyc3phd3NraSBIYWNrZXJzcGFjZTEQMA4G
+A1UECwwHaHNjbG91ZDEaMBgGA1UEAwwRQm9vdHN0cmFwIE5vZGUgQ0ExITAfBgkq
+hkiG9w0BCQEWEnEza0BoYWNrZXJzcGFjZS5wbDAeFw0xOTAxMTcxNTAwNTBaFw0y
+MDAxMTcxNTAwNTBaMIGbMQswCQYDVQQGEwJQTDEUMBIGA1UECAwLTWF6b3dpZWNr
+aWUxETAPBgNVBAcMCFdhcnN6YXdhMS4wLAYDVQQKDCVTdG93YXJ6eXN6ZW5pZSBX
+YXJzemF3c2tpIEhhY2tlcnNwYWNlMSIwIAYDVQQLDBlLdWJlcm5ldGVzIENhbGlj
+byBBY2NvdW50MQ8wDQYDVQQDDAZjYWxpY28wggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQD0/1yVcocxB6JZr3OKrO9ReAYF/q1gq6NT7JkswVL8OP8NYSU/
+uwumAiewgN86rm3KSny5szI3g7mL/QDTFA3rC1yNnwFv19ke9q+j7bU7PQrOgx3S
+g8qSLB14MbhokR43GBDRkfAEzbXzEIfKyukMuT/o8izHXZ9JBoxBFdNUzFXMP1fR
+l7ReWOVEqLS0r1DUbvLbhkwwoW278R+jEUjUjZ7m+m+FEWVyeouYi8FoOeQwIKTx
+V4cVaLvKj3hGhevuxyXnD8xX9L0wnzHQHEDSTLSFKtQ5rqPG/9d2YoTfhTcFLcuG
+3Fv6VfRCcDiZuKQiDxwr9GQhQxgN7X54mKLLZHqoIOyiNhZzSZLRN0R2WbRxb3B3
+01BhF1Cl8NPjqNAwfXQshRPbGHcMv0qK5jheEng3Nz8FRKYNNR6OXR9az9Aq2AsE
+4Bs1BjTLNmBRUiTfc07V2v8PR+GGauMQAV+krJFMsodgaQo87l3loX40nBsQjgL8
+M9NAHJgDE3A/cCSkJ838a9AK7QS8To5ro0k0tVeFHbtkY41u1JN4Mqhoo5kR1fr1
+OrmQ2hJ7/dS1k7Q/hDGMfW/a6wdrtsYJSe3TATIBqh7BxTnu2E3H5fCEri4yQCrO
+qViVoR/TdkjtIEZvOSvTbkLlVa1XD9+VKXUuZ26KQyG5msPUeVliyZd0OQIDAQAB
+MA0GCSqGSIb3DQEBCwUAA4ICAQAR8ljvIlI0/lWXAn++jaNK+o6gvLzQqtb9HY3l
+273ePbKhhx4+U3fH3XaD/Invg8WeiKyRsk/Enlk8opHO4Kc2nvwE1HKdHIHKQI6G
+tVnZbJPK4F5EQXL38G8MBhKRYEiBpH5zpXTeFTbsYHFwZKGwEwGICfrx2s68yMYm
+GOW3Tx61kvBiqSKt6QmEYsTZRd9eTxmnzr4qoSMU87IEz+HQHwcqJl66HUf6DxB/
+J4MlZSn9J6pTEcKN2eAjnOqM95JyRExOrCBxvpgQRYAc9615krpKKWtctbNMjxlN
+QL3i2cEwkkP2fMJAhLrA90sP9XnmkLkBYITzA+gPtPyGRRqlenRkZKAI3rCMOxuX
+CAduwz+qCMM0RGj+NlKWzi2COhwbT7j+GOFuaX9jQ9bq0L8prMA41i+K4CbFH1jE
+TMXC3HVLaXQLyVkGIwJvdYtlsw+4ZoLGdV/rROcWjq/N1a4206SYMzwEWHIaFLPf
+9d+qzfXgWKnygLV6hfRD3vqUBTu+slrClAoYgHGeqmNGd9n/GVxDBouCheXV+GdF
+xtRlIwztMNrYX0WxDn8AtkL0eC4a8lodJjLaI5A+RFRNBjt+p7/EyTI8EE6CnTmK
+3sZO+Bxi4RybgAoC7uyKDNmMa5iVQRbr0Nzdq2QsMfR+hwGwO+bjIUZyMnnu5b8b
+BNQFNg==
+-----END CERTIFICATE-----
diff --git a/cluster/kube/cluster.jsonnet b/cluster/kube/cluster.jsonnet
index 9209cf1..4eb9eee 100644
--- a/cluster/kube/cluster.jsonnet
+++ b/cluster/kube/cluster.jsonnet
@@ -2,6 +2,8 @@
local kube = import "../../kube/kube.libsonnet";
local coredns = import "lib/coredns.libsonnet";
+local metrics = import "lib/metrics.libsonnet";
+local calico = import "lib/calico.libsonnet";
local Cluster(fqdn) = {
local cluster = self,
@@ -40,9 +42,12 @@
],
},
+ // Calico network fabric
+ calico: calico.Environment {},
// CoreDNS for this cluster.
- dns: coredns.Environment {
- },
+ dns: coredns.Environment {},
+ // Metrics Server
+ metrics: metrics.Environment {},
};
diff --git a/cluster/kube/lib/calico.libsonnet b/cluster/kube/lib/calico.libsonnet
new file mode 100644
index 0000000..c66878e
--- /dev/null
+++ b/cluster/kube/lib/calico.libsonnet
@@ -0,0 +1,310 @@
+# Deploy hosted calico with its own etcd.
+
+local kube = import "../../../kube/kube.libsonnet";
+
+local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadata.name) {
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: "ClusterRole",
+ name: cr.metadata.name,
+ },
+ subjects: [
+ {
+ kind: "ServiceAccount",
+ name: sa.metadata.name,
+ namespace: sa.metadata.namespace,
+ },
+ ],
+};
+
+{
+ Environment: {
+ local env = self,
+ local cfg = env.cfg,
+ cfg:: {
+ namespace: "kube-system",
+ version: "v3.4.0",
+ imageController: "quay.io/calico/kube-controllers:" + cfg.version,
+ imageCNI: "quay.io/calico/cni:" + cfg.version,
+ imageNode: "quay.io/calico/node:" + cfg.version,
+ // TODO(q3k): Separate etcd for calico
+ etcd: {
+ endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)],
+ ca: importstr "../../certs/ca.crt",
+ cert: importstr "../../certs/kube-calico.crt",
+ key: importstr "../../secrets/plain/kube-calico.key",
+ },
+ },
+
+ cm: kube.ConfigMap("calico-config") {
+ local cm = self,
+ secretPrefix:: "/calico-secrets/",
+
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+
+ data: {
+ etcd_endpoints: std.join(",", cfg.etcd.endpoints),
+
+ etcd_ca: cm.secretPrefix + "etcd-ca",
+ etcd_cert: cm.secretPrefix + "etcd-cert",
+ etcd_key: cm.secretPrefix + "etcd-key",
+
+ calico_backend: "bird",
+ veth_mtu: "1440",
+
+ cni_network_config: |||
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "etcd_key_file": "__ETCD_KEY_FILE__",
+ "etcd_cert_file": "__ETCD_CERT_FILE__",
+ "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+ |||
+ },
+ },
+
+ secrets: kube.Secret("calico-secrets") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+
+ data_: {
+ "etcd-ca": cfg.etcd.ca,
+ "etcd-cert": cfg.etcd.cert,
+ "etcd-key": cfg.etcd.key,
+ },
+ },
+
+ saNode: kube.ServiceAccount("calico-node") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ },
+
+ crNode: kube.ClusterRole("calico-node") {
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["pods", "nodes", "namespaces"],
+ verbs: ["get"],
+ },
+ {
+ apiGroups: [""],
+ resources: ["endpoints", "services"],
+ verbs: ["watch", "list"],
+ },
+ {
+ apiGroups: [""],
+ resources: ["nodes/status"],
+ verbs: ["patch"],
+ },
+ ],
+ },
+
+ crbNode: bindServiceAccountClusterRole(env.saNode, env.crNode),
+
+ saController: kube.ServiceAccount("calico-kube-controllers") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ },
+
+ crController: kube.ClusterRole("calico-kube-controllers") {
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["pods", "nodes", "namespaces", "serviceaccounts"],
+ verbs: ["watch", "list"],
+ },
+ {
+ apiGroups: ["networking.k8s.io"],
+ resources: ["networkpolicies"],
+ verbs: ["watch", "list"],
+ },
+ ],
+ },
+
+ crbController: bindServiceAccountClusterRole(env.saController, env.crController),
+
+ controller: kube.Deployment("calico-kube-controllers") {
+ metadata+: {
+ namespace: cfg.namespace,
+ annotations+: {
+ "scheduler.alpha.kubernetes.io/critical-pod": "",
+ },
+ },
+ spec+: {
+ replicas: 1,
+ strategy: { type: "Recreate" },
+ template+: {
+ spec+: {
+ hostNetwork: true,
+ tolerations: [
+ { key: "CriticalAddonsOnly", operator: "Exists" },
+ ],
+ serviceAccountName: env.saController.metadata.name,
+ volumes_: {
+ secrets: kube.SecretVolume(env.secrets),
+ },
+ containers_: {
+ "calico-kube-controllers": kube.Container("calico-kube-controllers") {
+ image: cfg.imageController,
+ env_: {
+ ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
+ ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
+ ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
+ ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
+ ENABLED_CONTROLLERS: "policy,namespace,serviceaccount,workloadendpoint,node",
+ },
+ volumeMounts_: {
+ secrets: {
+ mountPath: env.cm.secretPrefix,
+ },
+ },
+ readinessProbe: {
+ exec: {
+ command: [ "/usr/bin/check-status", "-r" ],
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ nodeDaemon: kube.DaemonSet("calico-node") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ spec+: {
+ template+: {
+ metadata+: {
+ annotations+: {
+ "scheduler.alpha.kubernetes.io/critical-pod": "",
+ },
+ },
+ spec+: {
+ hostNetwork: true,
+ tolerations: [
+ { key: "CriticalAddonsOnly", operator: "Exists" },
+ { key: "NoExecute", operator: "Exists" },
+ { key: "NoSchedule", operator: "Exists" },
+ ],
+ serviceAccountName: env.saNode.metadata.name,
+ terminationGracePeriodSeconds: 0,
+ volumes_: {
+ cni_bin: kube.HostPathVolume("/opt/cni/bin"),
+ cni_config: kube.HostPathVolume("/opt/cni/conf"),
+ secrets: kube.SecretVolume(env.secrets),
+ lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"),
+ xtables_lock: kube.HostPathVolume("/run/xtables.lock"),
+ var_run_calico: kube.HostPathVolume("/var/run/calico"),
+ var_lib_calico: kube.HostPathVolume("/var/lib/calico"),
+ },
+ initContainers_: {
+ installCNI: kube.Container("install-cni") {
+ image: cfg.imageCNI,
+ command: ["/install-cni.sh"],
+ env_: {
+ ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
+ CNI_CONF_NAME: "10-calico.conflist",
+ CNI_NETWORK_CONFIG: kube.ConfigMapRef(env.cm, "cni_network_config"),
+ CNI_CONF_ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
+ CNI_CONF_ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
+ CNI_CONF_ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
+ CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
+ CNI_NET_DIR: "/opt/cni/conf",
+ SLEEP: "false",
+ },
+ volumeMounts_: {
+ cni_bin: { mountPath: "/host/opt/cni/bin" },
+ cni_config: { mountPath: "/host/etc/cni/net.d" },
+ secrets: { mountPath: env.cm.secretPrefix },
+ },
+ },
+ },
+ containers_: {
+ calicoNode: kube.Container("calico-node") {
+ image: cfg.imageNode,
+ env_: {
+ ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
+ ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
+ ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
+ ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
+ CALICO_K8S_NODE_REF: kube.FieldRef("spec.nodeName"),
+ CALICO_NETWORK_BACKEND: kube.ConfigMapRef(env.cm, "calico_backend"),
+ CLUSTER_TYPE: "k8s,bgp",
+ IP: kube.FieldRef("status.hostIP"),
+ CALICO_IPV4POOL_IPIP: "Always",
+ FELIX_IPINIPMTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
+ CALICO_IPV4POOL_CIDR: "10.10.24.0/21",
+ CALICO_DISABLE_FILE_LOGGING: "true",
+ FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT",
+ FELIX_IPV6SUPPORT: "false",
+ FELIX_LOGSEVERITYSCREEN: "info",
+ FELIX_HEALTHENABLED: "true",
+ CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24",
+ },
+ securityContext: {
+ privileged: true,
+ },
+ resources: {
+ requests: { cpu: "250m" },
+ },
+ livenessProbe: {
+ httpGet: {
+ path: "/liveness",
+ port: 9099,
+ host: "localhost",
+ },
+ periodSeconds: 10,
+ initialDelaySeconds: 10,
+ failureThreshold: 6,
+ },
+ readinessProbe: {
+ exec: {
+ command: ["/bin/calico-node", "-bird-ready", "-felix-ready"],
+ },
+ periodSeconds: 10,
+ },
+ volumeMounts_: {
+ lib_modules: { mountPath: "/lib/modules" },
+ xtables_lock: { mountPath: "/run/xtables.lock" },
+ var_run_calico: { mountPath: "/var/run/calico" },
+ var_lib_calico: { mountPath: "/var/lib/calico" },
+ secrets: { mountPath: env.cm.secretPrefix },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+}
diff --git a/cluster/kube/lib/metrics.libsonnet b/cluster/kube/lib/metrics.libsonnet
new file mode 100644
index 0000000..e11f5ef
--- /dev/null
+++ b/cluster/kube/lib/metrics.libsonnet
@@ -0,0 +1,142 @@
+# Deploy a per-cluster Metrics Server setup.
+
+local kube = import "../../../kube/kube.libsonnet";
+
+{
+ Environment: {
+ local env = self,
+ local cfg = env.cfg,
+ cfg:: {
+ image: "k8s.gcr.io/metrics-server-amd64:v0.3.1",
+ namespace: "kube-system",
+ },
+
+ sa: kube.ServiceAccount("metrics-server") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ },
+
+ # Cluster Role and Binding for the metrics server to allow reading node state.
+ crServer: kube.ClusterRole("system:metrics-server") {
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["pods", "nodes", "nodes/stats"],
+ verbs: ["get", "list", "watch"]
+ },
+ ],
+ },
+ crbServer: kube.ClusterRoleBinding("system:metrics-server") {
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: "ClusterRole",
+ name: env.crServer.metadata.name,
+ },
+ subjects: [
+ {
+ kind: "ServiceAccount",
+ name: env.sa.metadata.name,
+ namespace: env.sa.metadata.namespace,
+ },
+ ],
+ },
+
+ # Let the metrics server act as an auth delegator.
+ crbAuthDelegator: kube.ClusterRoleBinding("metrics-server:system:auth-delegator") {
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: "ClusterRole",
+ name: "system:auth-delegator",
+ },
+ subjects: [
+ {
+ kind: "ServiceAccount",
+ name: env.sa.metadata.name,
+ namespace: env.sa.metadata.namespace,
+ },
+ ],
+ },
+
+ # Let the metrics server access the apiserver extensions configmap.
+ rbAPIExtensionsMap: kube.RoleBinding("metrics-server-auth-reader") {
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: "Role",
+ name: "extension-apiserver-authentication-reader",
+ },
+ subjects: [
+ {
+ kind: "ServiceAccount",
+ name: env.sa.metadata.name,
+ namespace: env.sa.metadata.namespace,
+ },
+ ],
+ },
+
+
+ deployment: kube.Deployment("metrics-server") {
+ metadata+: {
+ namespace: cfg.namespace,
+ labels+: {
+ "k8s-app": "metrics-server",
+ },
+ },
+ spec+: {
+ template+: {
+ spec+: {
+ serviceAccountName: env.sa.metadata.name,
+ volumes_: {
+ tmp: {
+ emptyDir: {},
+ },
+ },
+ containers_: {
+ coredns: kube.Container("metrics-server") {
+ local container = self,
+
+ image: cfg.image,
+ imagePullPolicy: "IfNotPresent",
+ # TODO(q3k): define resource limits
+ ports_: {
+ https: {
+ containerPort: 443,
+ protocol: "TCP",
+ },
+ },
+ volumeMounts_: {
+ tmp: {
+ mountPath: "/tmp",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ svc: kube.Service("metrics-server") {
+ local svc = self,
+ metadata+: {
+ namespace: cfg.namespace,
+ },
+ target_pod: env.deployment.spec.template,
+ },
+ api: kube._Object("apiregistration.k8s.io/v1beta1", "APIService", "v1beta1.metrics.k8s.io") {
+ spec+: {
+ service: {
+ name: env.svc.metadata.name,
+ namespace: env.svc.metadata.namespace,
+ },
+ group: "metrics.k8s.io",
+ version: "v1beta1",
+ insecureSkipTLSVerify: true,
+ groupPriorityMinimum: 100,
+ versionPriority: 100,
+ },
+ },
+ },
+}
diff --git a/tools/clustercfg.py b/tools/clustercfg.py
index a36664a..0426cee 100644
--- a/tools/clustercfg.py
+++ b/tools/clustercfg.py
@@ -221,7 +221,7 @@
'-reqexts', 'SAN',
] if san else []))
- pki.sign(local_csr, local_cert, local_config, days)
+ pki.sign(local_csr, local_cert, local_config if san else None, days)
os.remove(local_csr)
os.remove(local_config)
@@ -329,6 +329,7 @@
modified |= shared_cert(p, c, fqdn, 'kube-{}'.format(component), Subject(o, ou, o))
modified |= shared_cert(p, c, fqdn, 'kube-apiserver', Subject(Subject.hswaw, 'Kubernetes API', cluster), san=['IP:10.10.12.1', 'DNS:' + cluster])
modified |= shared_cert(p, c, fqdn, 'kube-serviceaccounts', Subject(Subject.hswaw, 'Kubernetes Service Account Signer', 'service-accounts'))
+ modified |= shared_cert(p, c, fqdn, 'kube-calico', Subject(Subject.hswaw, 'Kubernetes Calico Account', 'calico'))
c.run('nixos-rebuild switch')