Sergiusz Bazanski | c78cc13 | 2020-02-02 22:31:53 +0100 | [diff] [blame] | 1 | { config, pkgs, lib, ... }: |
| 2 | |
| 3 | with (( import ./defs-cluster-k0.nix ) config.networking.hostName); |
| 4 | let |
| 5 | # Pin for k8s packages. This is so that upagrading the system will not upgrade the k8s control or data planes. |
| 6 | k8spkgs = import (fetchGit { |
| 7 | # Now at 1.14.3 |
| 8 | name = "nixos-unstable-2019-06-17"; |
| 9 | url = https://github.com/nixos/nixpkgs-channels/; |
| 10 | rev = "415e8e5820b7825fb74a6c7986bf6af725227eaa"; |
| 11 | }) {}; |
| 12 | # Pin for kubelet |
| 13 | k8spkgsKubelet = import (fetchGit { |
Sergiusz Bazanski | d736452 | 2020-02-02 23:43:28 +0100 | [diff] [blame] | 14 | # Now at 1.14.3 |
| 15 | name = "nixos-unstable-2019-06-17"; |
Sergiusz Bazanski | c78cc13 | 2020-02-02 22:31:53 +0100 | [diff] [blame] | 16 | url = https://github.com/nixos/nixpkgs-channels/; |
Sergiusz Bazanski | d736452 | 2020-02-02 23:43:28 +0100 | [diff] [blame] | 17 | rev = "415e8e5820b7825fb74a6c7986bf6af725227eaa"; |
Sergiusz Bazanski | c78cc13 | 2020-02-02 22:31:53 +0100 | [diff] [blame] | 18 | }) {}; |
| 19 | |
| 20 | in rec { |
| 21 | # Disable kubelet service and bring in our own override. |
| 22 | # Also nuke flannel from the orbit. |
| 23 | disabledModules = [ |
| 24 | "services/cluster/kubernetes/kubelet.nix" |
| 25 | "services/cluster/kubernetes/flannel.nix" |
| 26 | ]; |
| 27 | |
| 28 | imports = |
| 29 | [ |
| 30 | ./module-kubelet.nix |
| 31 | ]; |
| 32 | |
| 33 | # List services that you want to enable: |
| 34 | virtualisation.docker.enable = true; |
| 35 | virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --ip-forward=true"; |
| 36 | |
| 37 | # Docker 1.13 sets iptables FORWARD to DROP. Unfuck this. |
| 38 | systemd.services."docker-iptables-unfuck" = { |
| 39 | enable = true; |
| 40 | wantedBy = [ "kubernetes.target" ]; |
| 41 | description = "Docker iptable Unfuck"; |
| 42 | after = [ "docker.service" ]; |
| 43 | requires = [ "docker.service" ]; |
| 44 | path = [ pkgs.iptables ]; |
| 45 | script = '' |
| 46 | iptables -P FORWARD ACCEPT |
| 47 | ''; |
| 48 | serviceConfig.Type = "oneshot"; |
| 49 | }; |
| 50 | |
| 51 | networking.firewall.enable = false; |
| 52 | |
| 53 | # Point k8s apiserver address at ourselves, as every machine runs an apiserver with this cert name. |
| 54 | networking.extraHosts = '' |
| 55 | 127.0.0.1 ${k8sapi} |
| 56 | ''; |
| 57 | |
| 58 | security.acme.certs = { |
| 59 | host = { |
| 60 | email = acmeEmail; |
| 61 | domain = fqdn; |
| 62 | webroot = services.nginx.virtualHosts.host.root; |
| 63 | }; |
| 64 | }; |
| 65 | |
| 66 | services.nginx = { |
| 67 | enable = true; |
| 68 | virtualHosts.host = { |
| 69 | serverName = fqdn; |
| 70 | root = "/var/www/${fqdn}"; |
| 71 | }; |
| 72 | }; |
| 73 | |
| 74 | services.etcd = rec { |
| 75 | enable = true; |
| 76 | name = fqdn; |
| 77 | listenClientUrls = ["https://0.0.0.0:2379"]; |
| 78 | advertiseClientUrls = ["https://${fqdn}:2379"]; |
| 79 | listenPeerUrls = ["https://0.0.0.0:2380"]; |
| 80 | initialAdvertisePeerUrls = ["https://${fqdn}:2380"]; |
| 81 | initialCluster = (map (n: "${n.fqdn}=https://${n.fqdn}:2380") machines); |
| 82 | initialClusterState = "existing"; |
| 83 | |
| 84 | clientCertAuth = true; |
| 85 | trustedCaFile = pki.etcd.server.ca; |
| 86 | certFile = pki.etcd.server.cert; |
| 87 | keyFile = pki.etcd.server.key; |
| 88 | |
| 89 | peerClientCertAuth = true; |
| 90 | peerTrustedCaFile = pki.etcdPeer.ca; |
| 91 | peerCertFile = pki.etcdPeer.cert; |
| 92 | peerKeyFile = pki.etcdPeer.key; |
| 93 | |
| 94 | extraConf = { |
| 95 | PEER_CLIENT_CERT_AUTH = "true"; |
| 96 | }; |
| 97 | }; |
| 98 | |
| 99 | services.kubernetes = { |
| 100 | # Pin to specific k8s package. |
| 101 | package = k8spkgs.kubernetes; |
| 102 | roles = []; # We do not use any nixpkgs predefined roles for k8s. Instead, |
| 103 | # we enable k8s components manually. |
| 104 | |
| 105 | caFile = pki.kube.apiserver.ca; |
| 106 | clusterCidr = "10.10.16.0/20"; |
| 107 | |
| 108 | path = [ pkgs.e2fsprogs ]; # kubelet wants to mkfs.ext4 when mounting pvcs |
| 109 | |
| 110 | addons.dns.enable = false; |
| 111 | |
| 112 | apiserver = rec { |
| 113 | enable = true; |
| 114 | insecurePort = ports.k8sAPIServerPlain; |
| 115 | securePort = ports.k8sAPIServerSecure; |
| 116 | advertiseAddress = "${machine.ipAddr}"; |
| 117 | |
| 118 | etcd = { |
| 119 | # https://github.com/kubernetes/kubernetes/issues/72102 |
| 120 | servers = (map (n: "https://${n.fqdn}:2379") ( [ machine ] )); |
| 121 | caFile = pki.etcd.kube.ca; |
| 122 | keyFile = pki.etcd.kube.key; |
| 123 | certFile = pki.etcd.kube.cert; |
| 124 | }; |
| 125 | |
| 126 | tlsCertFile = pki.kube.apiserver.cert; |
| 127 | tlsKeyFile = pki.kube.apiserver.key; |
| 128 | |
| 129 | clientCaFile = pki.kube.apiserver.ca; |
| 130 | |
| 131 | kubeletHttps = true; |
| 132 | kubeletClientCaFile = pki.kube.apiserver.ca; |
| 133 | kubeletClientCertFile = pki.kube.apiserver.cert; |
| 134 | kubeletClientKeyFile = pki.kube.apiserver.key; |
| 135 | |
| 136 | serviceAccountKeyFile = pki.kube.serviceaccounts.key; |
| 137 | |
| 138 | allowPrivileged = true; |
| 139 | serviceClusterIpRange = "10.10.12.0/24"; |
| 140 | runtimeConfig = "api/all,authentication.k8s.io/v1beta1"; |
| 141 | authorizationMode = ["Node" "RBAC"]; |
| 142 | enableAdmissionPlugins = ["NamespaceLifecycle" "NodeRestriction" "LimitRanger" "ServiceAccount" "DefaultStorageClass" "ResourceQuota" "PodSecurityPolicy"]; |
| 143 | extraOpts = '' |
| 144 | --apiserver-count=5 \ |
| 145 | --proxy-client-cert-file=${pki.kubeFront.apiserver.cert} \ |
| 146 | --proxy-client-key-file=${pki.kubeFront.apiserver.key} \ |
| 147 | --requestheader-allowed-names= \ |
| 148 | --requestheader-client-ca-file=${pki.kubeFront.apiserver.ca} \ |
| 149 | --requestheader-extra-headers-prefix=X-Remote-Extra- \ |
| 150 | --requestheader-group-headers=X-Remote-Group \ |
| 151 | --requestheader-username-headers=X-Remote-User \ |
| 152 | -v=5 |
| 153 | ''; |
| 154 | }; |
| 155 | |
| 156 | controllerManager = { |
| 157 | enable = true; |
| 158 | bindAddress = "0.0.0.0"; |
| 159 | insecurePort = ports.k8sControllerManagerPlain; |
| 160 | leaderElect = true; |
| 161 | serviceAccountKeyFile = pki.kube.serviceaccounts.key; |
| 162 | rootCaFile = pki.kube.ca; |
| 163 | extraOpts = '' |
| 164 | --service-cluster-ip-range=10.10.12.0/24 \ |
| 165 | --use-service-account-credentials=true \ |
| 166 | --secure-port=${toString ports.k8sControllerManagerSecure}\ |
| 167 | ''; |
| 168 | kubeconfig = pki.kube.controllermanager.config; |
| 169 | }; |
| 170 | |
| 171 | scheduler = { |
| 172 | enable = true; |
| 173 | address = "0.0.0.0"; |
| 174 | port = 0; |
| 175 | leaderElect = true; |
| 176 | kubeconfig = pki.kube.scheduler.config; |
| 177 | }; |
| 178 | |
| 179 | proxy = { |
| 180 | enable = true; |
| 181 | kubeconfig = pki.kube.proxy.config; |
| 182 | extraOpts = '' |
| 183 | --hostname-override=${fqdn}\ |
| 184 | --proxy-mode=iptables |
| 185 | ''; |
| 186 | }; |
| 187 | |
| 188 | kubelet = { |
| 189 | enable = true; |
| 190 | unschedulable = false; |
| 191 | hostname = fqdn; |
| 192 | tlsCertFile = pki.kube.kubelet.cert; |
| 193 | tlsKeyFile = pki.kube.kubelet.key; |
| 194 | clientCaFile = pki.kube.kubelet.ca; |
| 195 | nodeIp = machine.ipAddr; |
| 196 | networkPlugin = "cni"; |
| 197 | clusterDns = "10.10.12.254"; |
| 198 | kubeconfig = pki.kube.kubelet.config; |
| 199 | extraOpts = '' |
| 200 | --read-only-port=0 |
| 201 | ''; |
| 202 | package = k8spkgsKubelet.kubernetes; |
| 203 | }; |
| 204 | |
| 205 | }; |
| 206 | |
| 207 | # https://github.com/NixOS/nixpkgs/issues/60687 |
| 208 | systemd.services.kube-control-plane-online = { |
| 209 | preStart = pkgs.lib.mkForce ""; |
| 210 | }; |
| 211 | # this seems to depend on flannel |
| 212 | # TODO(q3k): file issue |
| 213 | systemd.services.kubelet-online = { |
| 214 | script = pkgs.lib.mkForce "sleep 1"; |
| 215 | }; |
| 216 | } |