blob: 18faca0c7f660e81881c8d4d1591c0e8a1b9f5d8 [file] [log] [blame]
Sergiusz Bazanski73cef112019-04-07 00:06:23 +02001{ config, pkgs, lib, ... }:
2
3with ( import ./toplevel.nix );
4let
5 fqdn = config.networking.hostName + domain;
6 node = (builtins.head (builtins.filter (n: n.fqdn == fqdn) nodes));
7 otherNodes = (builtins.filter (n: n.fqdn != fqdn) nodes);
8
9in rec {
10 imports =
11 [ # Include the results of the hardware scan.
12 ./hardware-configuration.nix
13 ];
14
15 # Use the GRUB 2 boot loader.
16 boot.loader.grub.enable = true;
17 boot.loader.grub.version = 2;
18 boot.loader.grub.device = node.diskBoot;
19
20 time.timeZone = "Europe/Warsaw";
21
22 # List packages installed in system profile. To search, run:
23 # $ nix search wget
24 environment.systemPackages = with pkgs; [
25 wget vim htop tcpdump
26 rxvt_unicode.terminfo
27 ];
28
29 # Some programs need SUID wrappers, can be configured further or are
30 # started in user sessions.
31 programs.mtr.enable = true;
32
33 # List services that you want to enable:
34 virtualisation.docker.enable = true;
35 virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --ip-forward=true";
36
37 # Docker 1.13 sets iptables FORWARD to DROP. Unfuck this.
38 systemd.services."docker-iptables-unfuck" = {
39 enable = true;
40 wantedBy = [ "kubernetes.target" ];
41 description = "Docker iptable Unfuck";
42 after = [ "docker.service" ];
43 requires = [ "docker.service" ];
44 path = [ pkgs.iptables ];
45 script = ''
46 iptables -P FORWARD ACCEPT
47 '';
48 serviceConfig.Type = "oneshot";
49 };
50
51 # Enable the OpenSSH daemon.
52 services.openssh.enable = true;
53 users.users.root.openssh.authorizedKeys.keys = [
54 "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDD4VJXAXEHEXZk2dxNwehneuJcEGkfXG/U7z4fO79vDVIENdedtXQUyLyhZJc5RTEfHhQj66FwIqzl7mzBHd9x9PuDp6QAYXrkVNMj48s6JXqZqBvF6H/weRqFMf4a2TZv+hG8D0kpvmLheCwWAVRls7Jofnp/My+yDd57GMdsbG/yFEf6WPMiOnA7hxdSJSVihCsCSw2p8PD4GhBe8CVt7xIuinhutjm9zYBjV78NT8acjDUfJh0B1ODTjs7nuW1CC4jybSe2j/OU3Yczj4AxRxBNWuFxUq+jBo9BfpbKLh+Tt7re+zBkaicM77KM/oV6943JJxgHNBBOsv9scZE7 q3k@amnesia"
55 "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQb3YQoiYFZLKwvHYKbu1bMqzNeDCAszQhAe1+QI5SLDOotclyY/vFmOReZOsmyMFl71G2d7d+FbYNusUnNNjTxRYQ021tVc+RkMdLJaORRURmQfEFEKbai6QSFTwErXzuoIzyEPK0lbsQuGgqT9WaVnRzHJ2Q/4+qQbxAS34PuR5NqEkmn4G6LMo3OyJ5mwPkCj9lsqz4BcxRaMWFO3mNcwGDfSW+sqgc3E8N6LKrTpZq3ke7xacpQmcG5DU9VO+2QVPdltl9jWbs3gXjmF92YRNOuKPVfAOZBBsp8JOznfx8s9wDgs7RwPmDpjIAJEyoABqW5hlXfqRbTnfnMvuR informatic@InformaticPC"
56 "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDGkMgEVwQM8yeuFUYL2TwlJIq9yUNBmHnwce46zeL2PK2CkMz7sxT/om7sp/K5XDiqeD05Nioe+Dr3drP6B8uI33S5NgxPIfaqQsRS+CBEgk6cqFlcdlKETU/DT+/WsdoO173n7mgGeafPInEuQuGDUID0Fl099kIxtqfAhdeZFMM6/szAZEZsElLJ8K6dp1Ni/jmnXCZhjivZH3AZUlnqrmtDG7FY1bgcOfDXAal45LItughGPtrdiigXe9DK2fW3+9DBZZduh5DMJTNlphAZ+nfSrbyHVKUg6WsgMSprur4KdU47q1QwzqqvEj75JcdP1jOWoZi4F6VJDte9Wb9lhD1jGgjxY9O6Gs4CH35bx15W7CN9hgNa0C8NbPJe/fZYIeMZmJ1m7O2xmnYwP8j+t7RNJWu7Pa3Em4mOEXvhBF07Zfq+Ye/4SluoRgADy5eII2x5fFo5EBhInxK0/X8wF6XZvysalVifoCh7T4Edejoi91oAxFgYAxbboXGlod0eEHIi2hla8SM9+IBHOChmgawKBYp2kzAJyAmHNBF+Pah9G4arVCj/axp/SJZDZbJQoI7UT/fJzEtvlb5RWrHXRq+y6IvjpUq4pzpDWW04+9UMqEEXRmhWOakHfEVM9rN8h3aJBflLUBBnh0Z/hVsKNh8bCRHaKtah8TrD9i+wMw== patryk.jakuszew@gmail.com"
57 "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC33naG1ptCvUcRWX9cj9wXM1nW1lyQC4SvMJzWlr9aMD96O8hQ2JMkuIUgUJvorAY02QRplQ2BuoVoVkdkzwjMyi1bL3OdgcKo7Z1yByClGTTocqNJYY0lcUb6EJH8+6e6F9ydrQlSxNzL1uCaA7phZr+yPcmAmWbSfioXn98yXNkE0emHxzJv/nypJY56sDCMC2IXDRd8L2goDtPwgPEW7bWfAQdIFMJ75xOidZOTxJ8eqyXLw/kxY5UlyX66jdoYz1sE5XUHuoQl1AOG9UdlMo0aMhUvP4pX5l7r7EnA9OttKMFB3oWqkVK/R6ynZ52YNOU5BZ9V+Ppaj34W0xNu+p0mbHcCtXYCTrf/OU0hcZDbDaNTjs6Vtcm2wYw9iAKX7Tex+eOMwUwlrlcyPNRV5BTot7lGNYfauHCSIuWJKN4NhCLR/NtVNh4/94eKkPTwJsY6XqDcS7q49wPAs4DAH7BJgsbHPOqygVHrY0YYEfz3Pj0HTxJHQMCP/hQX4fXEGt0BjgoVJbXPAQtPyeg0JuxiUg+b4CgVVfQ6R060MlM1BZzhmh+FY5MJH6nJppS0aHYCvSg8Z68NUlCPKy0jpcyfuAIWQWwSGG1O010WShQG2ELsvNdg5/4HVdCGNl5mmoom6JOd72FOZyQlHDFfeQUQRn9HOeCq/c51rK99SQ== bartek@IHM"
58 ];
59
60 networking.firewall.enable = false;
61
62 # Point k8s apiserver address at ourselves, as every node runs an apiserver with this cert name.
63 networking.extraHosts = ''
64 127.0.0.1 ${k8sapi}
65 '';
66
67 security.acme.certs = {
68 host = {
69 email = acmeEmail;
70 domain = fqdn;
71 webroot = services.nginx.virtualHosts.host.root;
72 };
73 };
74
75 services.nginx = {
76 enable = true;
77 virtualHosts.host = {
78 serverName = fqdn;
79 root = "/var/www/${fqdn}";
80 };
81 };
82
83 services.etcd = {
84 enable = true;
85 name = fqdn;
86 listenClientUrls = ["https://0.0.0.0:2379"];
87 advertiseClientUrls = ["https://${fqdn}:2379"];
88 listenPeerUrls = ["https://0.0.0.0:2380"];
89 initialAdvertisePeerUrls = ["https://${fqdn}:2380"];
90 initialCluster = (map (n: "${n.fqdn}=https://${n.fqdn}:2380") nodes);
91
92 clientCertAuth = true;
93 trustedCaFile = pki.etcd.server.ca;
94 certFile = pki.etcd.server.cert;
95 keyFile = pki.etcd.server.key;
96
97 peerClientCertAuth = true;
98 peerTrustedCaFile = pki.etcdPeer.ca;
99 peerCertFile = pki.etcdPeer.cert;
100 peerKeyFile = pki.etcdPeer.key;
101
102 extraConf = {
103 PEER_CLIENT_CERT_AUTH = "true";
104 };
105 };
106
107 services.kubernetes = {
108 roles = []; # We do not use any nixpkgs predefined roles for k8s. Instead,
109 # we enable k8s components manually.
110
111 caFile = pki.kube.apiserver.ca;
112 clusterCidr = "10.10.16.0/20";
113 verbose = false;
114
115 path = [ pkgs.e2fsprogs ]; # kubelet wants to mkfs.ext4 when mounting pvcs
116
117 addons.dns.enable = false;
118
119 etcd = {
120 servers = (map (n: "https://${n.fqdn}:2379") nodes);
121 caFile = pki.etcd.kube.ca;
122 keyFile = pki.etcd.kube.key;
123 certFile = pki.etcd.kube.cert;
124 };
125
126 apiserver = rec {
127 enable = true;
128 port = ports.k8sAPIServerPlain;
129 securePort = ports.k8sAPIServerSecure;
130 advertiseAddress = "${node.ipAddr}";
131
132 tlsCertFile = pki.kube.apiserver.cert;
133 tlsKeyFile = pki.kube.apiserver.key;
134
135 clientCaFile = pki.kube.apiserver.ca;
136
137 kubeletHttps = true;
138 kubeletClientCaFile = pki.kube.apiserver.ca;
139 kubeletClientCertFile = pki.kube.apiserver.cert;
140 kubeletClientKeyFile = pki.kube.apiserver.key;
141
142 serviceAccountKeyFile = pki.kube.serviceaccounts.key;
143
144 serviceClusterIpRange = "10.10.12.0/24";
145 runtimeConfig = "api/all,authentication.k8s.io/v1beta1";
146 authorizationMode = ["Node" "RBAC"];
147 enableAdmissionPlugins = ["Initializers" "NamespaceLifecycle" "NodeRestriction" "LimitRanger" "ServiceAccount" "DefaultStorageClass" "ResourceQuota"];
148 extraOpts = ''
149 --apiserver-count=3 \
150 --proxy-client-cert-file=${pki.kubeFront.apiserver.cert} \
151 --proxy-client-key-file=${pki.kubeFront.apiserver.key} \
152 --requestheader-allowed-names= \
153 --requestheader-client-ca-file=${pki.kubeFront.apiserver.ca} \
154 --requestheader-extra-headers-prefix=X-Remote-Extra- \
155 --requestheader-group-headers=X-Remote-Group \
156 --requestheader-username-headers=X-Remote-User \
157 -v=5
158 '';
159 };
160
161 controllerManager = {
162 enable = true;
163 address = "0.0.0.0";
164 port = ports.k8sControllerManagerPlain;
165 leaderElect = true;
166 serviceAccountKeyFile = pki.kube.serviceaccounts.key;
167 rootCaFile = pki.kube.ca;
168 extraOpts = ''
169 --service-cluster-ip-range=10.10.12.0/24 \
170 --use-service-account-credentials=true \
171 --secure-port=${toString ports.k8sControllerManagerSecure}\
172 '';
173 kubeconfig = pki.kube.controllermanager.config;
174 };
175
176 scheduler = {
177 enable = true;
178 address = "0.0.0.0";
179 port = 0;
180 leaderElect = true;
181 kubeconfig = pki.kube.scheduler.config;
182 };
183
184 proxy = {
185 enable = true;
186 kubeconfig = pki.kube.proxy.config;
187 extraOpts = ''
188 --hostname-override=${fqdn}\
189 --proxy-mode=iptables
190 '';
191 };
192
193 kubelet = {
194 enable = true;
195 unschedulable = false;
196 hostname = fqdn;
197 tlsCertFile = pki.kube.kubelet.cert;
198 tlsKeyFile = pki.kube.kubelet.key;
199 clientCaFile = pki.kube.kubelet.ca;
200 nodeIp = node.ipAddr;
201 networkPlugin = "cni";
202 clusterDns = "10.10.12.254";
203 kubeconfig = pki.kube.kubelet.config;
204 extraOpts = ''
205 --cni-conf-dir=/opt/cni/conf \
206 --cni-bin-dir=/opt/cni/bin
207 '';
208 };
209 };
210}