calico 3.14 -> 1.15

Change-Id: I9eceaf26017e483235b97c8d08717d2750fabe25
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/995
Reviewed-by: q3k <q3k@hackerspace.pl>
diff --git a/cluster/kube/lib/calico.libsonnet b/cluster/kube/lib/calico.libsonnet
index 1e2d503..6a9b799 100644
--- a/cluster/kube/lib/calico.libsonnet
+++ b/cluster/kube/lib/calico.libsonnet
@@ -23,11 +23,11 @@
         local cfg = env.cfg,
         cfg:: {
             namespace: "kube-system",
-            version: "v3.14.0",
+            version: "v3.15.5",
             imageController: "calico/kube-controllers:" + cfg.version,
             imageCNI: "calico/cni:" + cfg.version,
             imageNode: "calico/node:" + cfg.version,
-            // TODO(q3k): Separate etcd for calico
+            // TODO(implr): migrate calico from etcd to apiserver
             etcd: {
                 endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)],
                 ca: importstr "../../certs/ca-etcd.crt",
@@ -136,16 +136,6 @@
                     verbs: ["patch", "update"],
                 },
                 {
-                    apiGroups: ["networking.k8s.io"],
-                    resources: ["networkpolicies"],
-                    verbs: ["watch", "list"],
-                },
-                {
-                    apiGroups: [""],
-                    resources: ["pods", "namespaces", "serviceaccounts"],
-                    verbs: ["watch", "list"],
-                },
-                {
                     apiGroups: [""],
                     resources: ["pods/status"],
                     verbs: ["patch"],
@@ -186,8 +176,8 @@
         controller: kube.Deployment("calico-kube-controllers") {
             metadata+: {
                 namespace: cfg.namespace,
-                annotations+: {
-                    "scheduler.alpha.kubernetes.io/critical-pod": "",
+                labels+: {
+                    "k8s-app": "calico-kube-controllers",
                 },
             },
             spec+: {
@@ -195,14 +185,16 @@
                 strategy: { type: "Recreate" },
                 template+: {
                     spec+: {
-                        hostNetwork: true,
+                        nodeSelector: {
+                            "kubernetes.io/os": "linux"
+                        },
                         tolerations: [
                             { key: "CriticalAddonsOnly", operator: "Exists" },
+                            { key: "node-role.kubernetes.io/master", effect: "NoSchedule" },
                         ],
                         serviceAccountName: env.saController.metadata.name,
-                        volumes_: {
-                            secrets: kube.SecretVolume(env.secrets),
-                        },
+                        priorityClassName: "system-cluster-critical",
+                        hostNetwork: true,
                         containers_: {
                             "calico-kube-controllers": kube.Container("calico-kube-controllers") {
                                 image: cfg.imageController,
@@ -225,6 +217,9 @@
                                 },
                             },
                         },
+                        volumes_: {
+                            secrets: kube.SecretVolume(env.secrets),
+                        },
                     },
                 },
             },
@@ -244,46 +239,49 @@
         nodeDaemon: kube.DaemonSet("calico-node") {
             metadata+: {
                 namespace: cfg.namespace,
+                labels+: {
+                    "k8s-app": "calico-node",
+                },
             },
             spec+: {
                 template+: {
-                    metadata+: {
-                        annotations+: {
-                            "scheduler.alpha.kubernetes.io/critical-pod": "",
-                        },
-                    },
                     spec+: {
+                        nodeSelector: {
+                            "kubernetes.io/os": "linux"
+                        },
                         hostNetwork: true,
                         tolerations: [
+                            { effect: "NoSchedule", operator: "Exists" },
                             { key: "CriticalAddonsOnly", operator: "Exists" },
                             { effect: "NoExecute", operator: "Exists" },
-                            { effect: "NoSchedule", operator: "Exists" },
                         ],
                         serviceAccountName: env.saNode.metadata.name,
                         terminationGracePeriodSeconds: 0,
+                        priorityClassName: "system-cluster-critical",
                         volumes_: {
+                            lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"),
+                            var_run_calico: kube.HostPathVolume("/var/run/calico"),
+                            var_lib_calico: kube.HostPathVolume("/var/lib/calico"),
+                            xtables_lock: kube.HostPathVolume("/run/xtables.lock"),
                             cni_bin: kube.HostPathVolume("/opt/cni/bin"),
                             cni_config: kube.HostPathVolume("/opt/cni/conf"),
                             secrets: kube.SecretVolume(env.secrets),
-                            lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"),
-                            xtables_lock: kube.HostPathVolume("/run/xtables.lock"),
-                            var_run_calico: kube.HostPathVolume("/var/run/calico"),
-                            var_lib_calico: kube.HostPathVolume("/var/lib/calico"),
                             bird_cfg_template: kube.ConfigMapVolume(env.calicoMetallbBird),
+                            # TODO flexvol-driver-host, policysync
                         },
                         initContainers_: {
                             installCNI: kube.Container("install-cni") {
                                 image: cfg.imageCNI,
                                 command: ["/install-cni.sh"],
                                 env_: {
-                                    ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
                                     CNI_CONF_NAME: "10-calico.conflist",
                                     CNI_NETWORK_CONFIG: kube.ConfigMapRef(env.cm, "cni_network_config"),
+                                    ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
+                                    CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
+                                    # TODO(implr) needed?
                                     CNI_CONF_ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
                                     CNI_CONF_ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
                                     CNI_CONF_ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
-                                    CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
-                                    CNI_NET_DIR: "/opt/cni/conf",
                                     SLEEP: "false",
                                     KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } },
                                 },
@@ -292,6 +290,9 @@
                                     cni_config: { mountPath: "/host/etc/cni/net.d" },
                                     secrets: { mountPath: env.cm.secretPrefix },
                                 },
+                                securityContext: {
+                                    privileged: true,
+                                },
                             },
                         },
                         containers_: {
@@ -310,11 +311,12 @@
                                     IP_AUTODETECTION_METHOD: "can-reach=185.236.240.1",
                                     CALICO_IPV4POOL_IPIP: "Always",
                                     FELIX_IPINIPMTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
+                                    FELIX_WIREGUARDMTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
                                     CALICO_IPV4POOL_CIDR: "10.10.24.0/21",
                                     CALICO_DISABLE_FILE_LOGGING: "true",
                                     FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT",
-                                    FELIX_IPV6SUPPORT: "false",
                                     FELIX_LOGSEVERITYSCREEN: "info",
+                                    FELIX_IPV6SUPPORT: "false",
                                     FELIX_HEALTHENABLED: "true",
                                     FELIX_HEALTHHOST: "127.0.0.1",
                                     CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24",
diff --git a/cluster/tools/BUILD b/cluster/tools/BUILD
index a63245e..e43a809 100644
--- a/cluster/tools/BUILD
+++ b/cluster/tools/BUILD
@@ -13,17 +13,13 @@
     visibility = ["//visibility:public"],
 )
 
-copy_go_binary(
-    name = "calicoctl.bin",
-    src = "@com_github_projectcalico_calicoctl//calicoctl:calicoctl",
-    visibility = ["//visibility:public"],
-)
-
 sh_binary(
     name = "calicoctl",
     srcs = ["calicoctl.sh"],
     data = [
-        ":calicoctl.bin",
+        # it's not worth the effort to bazelify all intermediary versions of calicoctl
+        # just to use them once, so until we finish upgrading use upstream binaries
+        "@calicoctl_3_15//file",
         "//tools:secretstore",
         "//tools/hscloud",
     ],
diff --git a/cluster/tools/calicoctl.sh b/cluster/tools/calicoctl.sh
index 86e1097..5f1390e 100755
--- a/cluster/tools/calicoctl.sh
+++ b/cluster/tools/calicoctl.sh
@@ -16,7 +16,7 @@
         $(hscloud::must_rlocation hscloud/tools/secretstore) decrypt "$ws/cluster/secrets/cipher/etcd-calico.key" "$ETCD_KEY_FILE"
     fi
 
-    "$(hscloud::must_rlocation hscloud/cluster/tools/calicoctl.bin)" "$@"
+    "$(hscloud::must_rlocation calicoctl_3_15/file/calicoctl)" "$@"
 }
 
 main "$@"