blob: cc5050324a08d4a4eca8b9701a723c1241c5235d [file] [log] [blame]
Sergiusz Bazanskic78cc132020-02-02 22:31:53 +01001{ config, pkgs, lib, ... }:
2
3with (( import ./defs-cluster-k0.nix ) config.networking.hostName);
4let
5 # Pin for k8s packages. This is so that upagrading the system will not upgrade the k8s control or data planes.
6 k8spkgs = import (fetchGit {
7 # Now at 1.14.3
8 name = "nixos-unstable-2019-06-17";
9 url = https://github.com/nixos/nixpkgs-channels/;
10 rev = "415e8e5820b7825fb74a6c7986bf6af725227eaa";
11 }) {};
12 # Pin for kubelet
13 k8spkgsKubelet = import (fetchGit {
14 # Now at 1.13.5
15 name = "nixos-unstable-2019-04-12";
16 url = https://github.com/nixos/nixpkgs-channels/;
17 rev = "1fc591f9a5bd1b016b5d66dfab29560073955a14";
18 }) {};
19
20in rec {
21 # Disable kubelet service and bring in our own override.
22 # Also nuke flannel from the orbit.
23 disabledModules = [
24 "services/cluster/kubernetes/kubelet.nix"
25 "services/cluster/kubernetes/flannel.nix"
26 ];
27
28 imports = [
29 ./module-kubelet.nix
30 ];
31
32 virtualisation.docker.enable = true;
33 virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --ip-forward=true";
34
35 # Docker 1.13 sets iptables FORWARD to DROP. Unfuck this.
36 systemd.services."docker-iptables-unfuck" = {
37 enable = true;
38 wantedBy = [ "kubernetes.target" ];
39 description = "Docker iptable Unfuck";
40 after = [ "docker.service" ];
41 requires = [ "docker.service" ];
42 path = [ pkgs.iptables ];
43 script = ''
44 iptables -P FORWARD ACCEPT
45 '';
46 serviceConfig.Type = "oneshot";
47 };
48
49 networking.firewall.enable = false;
50
51 # Point k8s apiserver address at ourselves, as every machine runs an apiserver with this cert name.
52 networking.extraHosts = ''
53 127.0.0.1 ${k8sapi}
54 '';
55
56 security.acme.certs = {
57 host = {
58 email = acmeEmail;
59 domain = fqdn;
60 webroot = services.nginx.virtualHosts.host.root;
61 };
62 };
63
64 services.nginx = {
65 enable = true;
66 virtualHosts.host = {
67 serverName = fqdn;
68 root = "/var/www/${fqdn}";
69 };
70 };
71
72 services.etcd = rec {
73 enable = true;
74 name = fqdn;
75 listenClientUrls = ["https://0.0.0.0:2379"];
76 advertiseClientUrls = ["https://${fqdn}:2379"];
77 listenPeerUrls = ["https://0.0.0.0:2380"];
78 initialAdvertisePeerUrls = ["https://${fqdn}:2380"];
79 initialCluster = (map (n: "${n.fqdn}=https://${n.fqdn}:2380") machines);
80 initialClusterState = "existing";
81
82 clientCertAuth = true;
83 trustedCaFile = pki.etcd.server.ca;
84 certFile = pki.etcd.server.cert;
85 keyFile = pki.etcd.server.key;
86
87 peerClientCertAuth = true;
88 peerTrustedCaFile = pki.etcdPeer.ca;
89 peerCertFile = pki.etcdPeer.cert;
90 peerKeyFile = pki.etcdPeer.key;
91
92 extraConf = {
93 PEER_CLIENT_CERT_AUTH = "true";
94 };
95 };
96
97 services.kubernetes = {
98 # Pin to specific k8s package.
99 package = k8spkgs.kubernetes;
100 roles = []; # We do not use any nixpkgs predefined roles for k8s. Instead,
101 # we enable k8s components manually.
102
103 caFile = pki.kube.apiserver.ca;
104 clusterCidr = "10.10.16.0/20";
105
106 path = [ pkgs.e2fsprogs ]; # kubelet wants to mkfs.ext4 when mounting pvcs
107
108 addons.dns.enable = false;
109
110 apiserver = rec {
111 enable = true;
112 insecurePort = ports.k8sAPIServerPlain;
113 securePort = ports.k8sAPIServerSecure;
114 advertiseAddress = "${machine.ipAddr}";
115
116 etcd = {
117 # https://github.com/kubernetes/kubernetes/issues/72102
118 servers = (map (n: "https://${n.fqdn}:2379") ( [ machine ] ));
119 caFile = pki.etcd.kube.ca;
120 keyFile = pki.etcd.kube.key;
121 certFile = pki.etcd.kube.cert;
122 };
123
124 tlsCertFile = pki.kube.apiserver.cert;
125 tlsKeyFile = pki.kube.apiserver.key;
126
127 clientCaFile = pki.kube.apiserver.ca;
128
129 kubeletHttps = true;
130 kubeletClientCaFile = pki.kube.apiserver.ca;
131 kubeletClientCertFile = pki.kube.apiserver.cert;
132 kubeletClientKeyFile = pki.kube.apiserver.key;
133
134 serviceAccountKeyFile = pki.kube.serviceaccounts.key;
135
136 allowPrivileged = true;
137 serviceClusterIpRange = "10.10.12.0/24";
138 runtimeConfig = "api/all,authentication.k8s.io/v1beta1";
139 authorizationMode = ["Node" "RBAC"];
140 enableAdmissionPlugins = ["NamespaceLifecycle" "NodeRestriction" "LimitRanger" "ServiceAccount" "DefaultStorageClass" "ResourceQuota" "PodSecurityPolicy"];
141 extraOpts = ''
142 --apiserver-count=5 \
143 --proxy-client-cert-file=${pki.kubeFront.apiserver.cert} \
144 --proxy-client-key-file=${pki.kubeFront.apiserver.key} \
145 --requestheader-allowed-names= \
146 --requestheader-client-ca-file=${pki.kubeFront.apiserver.ca} \
147 --requestheader-extra-headers-prefix=X-Remote-Extra- \
148 --requestheader-group-headers=X-Remote-Group \
149 --requestheader-username-headers=X-Remote-User \
150 -v=5
151 '';
152 };
153
154 controllerManager = {
155 enable = true;
156 bindAddress = "0.0.0.0";
157 insecurePort = ports.k8sControllerManagerPlain;
158 leaderElect = true;
159 serviceAccountKeyFile = pki.kube.serviceaccounts.key;
160 rootCaFile = pki.kube.ca;
161 extraOpts = ''
162 --service-cluster-ip-range=10.10.12.0/24 \
163 --use-service-account-credentials=true \
164 --secure-port=${toString ports.k8sControllerManagerSecure}\
165 '';
166 kubeconfig = pki.kube.controllermanager.config;
167 };
168
169 scheduler = {
170 enable = true;
171 address = "0.0.0.0";
172 port = 0;
173 leaderElect = true;
174 kubeconfig = pki.kube.scheduler.config;
175 };
176
177 proxy = {
178 enable = true;
179 kubeconfig = pki.kube.proxy.config;
180 extraOpts = ''
181 --hostname-override=${fqdn}\
182 --proxy-mode=iptables
183 '';
184 };
185
186 kubelet = {
187 enable = true;
188 unschedulable = false;
189 hostname = fqdn;
190 tlsCertFile = pki.kube.kubelet.cert;
191 tlsKeyFile = pki.kube.kubelet.key;
192 clientCaFile = pki.kube.kubelet.ca;
193 nodeIp = machine.ipAddr;
194 networkPlugin = "cni";
195 clusterDns = "10.10.12.254";
196 kubeconfig = pki.kube.kubelet.config;
197 extraOpts = ''
198 --read-only-port=0
199 '';
200 package = k8spkgsKubelet.kubernetes;
201 };
202
203 };
204
205 # https://github.com/NixOS/nixpkgs/issues/60687
206 systemd.services.kube-control-plane-online = {
207 preStart = pkgs.lib.mkForce "";
208 };
209 # this seems to depend on flannel
210 # TODO(q3k): file issue
211 systemd.services.kubelet-online = {
212 script = pkgs.lib.mkForce "sleep 1";
213 };
214}