| { config, pkgs, lib, machines, ... }: |
| |
| with lib; |
| |
| let |
| cfg = config.hscloud.kube.control; |
| |
| # All control plane nodes. |
| allNodes = let |
| list = mapAttrsToList (_: v: v) machines; |
| filtered = filter (m: (m.config ? hscloud.kube.control) && (m.config.hscloud.kube.control.enable)) list; |
| sorted = sort (a: b: a.config.hscloud.base.fqdn < b.config.hscloud.base.fqdn) filtered; |
| in sorted; |
| |
| # All control plane nodes that aren't the node being evaluated. |
| otherNodes = (filter (m: m.config.networking.hostName != config.networking.hostName) allNodes); |
| |
| fqdn = config.hscloud.base.fqdn; |
| |
| pki = config.hscloud.kube.pki; |
| |
| in { |
| imports = [ |
| ./kube-common.nix |
| ]; |
| |
| options.hscloud.kube.control = { |
| enable = mkEnableOption "kubernetes control plane"; |
| portControllerManagerSecure = mkOption { |
| type = types.int; |
| description = "Port at which k8s controller-manager will listen."; |
| default = 4003; |
| }; |
| portSchedulerSecure = mkOption { |
| type = types.int; |
| description = "Port at which k8s scheduler will listen."; |
| default = 4005; |
| }; |
| }; |
| |
| config = mkIf cfg.enable { |
| networking.firewall.enable = false; |
| |
| # Point k8s apiserver address at ourselves, as we _are_ the apiserver. |
| networking.extraHosts = '' |
| 127.0.0.1 k0.hswaw.net |
| ''; |
| |
| services.etcd = rec { |
| enable = true; |
| name = fqdn; |
| listenClientUrls = ["https://0.0.0.0:2379"]; |
| advertiseClientUrls = ["https://${fqdn}:2379"]; |
| listenPeerUrls = ["https://0.0.0.0:2380"]; |
| initialAdvertisePeerUrls = ["https://${fqdn}:2380"]; |
| initialCluster = (map (n: "${n.config.hscloud.base.fqdn}=https://${n.config.hscloud.base.fqdn}:2380") allNodes); |
| initialClusterState = "existing"; |
| |
| clientCertAuth = true; |
| trustedCaFile = pki.etcd.server.ca; |
| certFile = pki.etcd.server.cert; |
| keyFile = pki.etcd.server.key; |
| |
| peerClientCertAuth = true; |
| peerTrustedCaFile = pki.etcd.peer.ca; |
| peerCertFile = pki.etcd.peer.cert; |
| peerKeyFile = pki.etcd.peer.key; |
| |
| extraConf = { |
| PEER_CLIENT_CERT_AUTH = "true"; |
| }; |
| }; |
| |
| # https://github.com/NixOS/nixpkgs/issues/60687 |
| systemd.services.kube-control-plane-online = { |
| preStart = pkgs.lib.mkForce ""; |
| }; |
| |
| services.kubernetes = { |
| package = config.hscloud.kube.package; |
| # We do not use any nixpkgs predefined roles for k8s. Instead, we enable |
| # k8s components manually. |
| roles = []; |
| addons.dns.enable = false; |
| caFile = pki.kube.apiserver.ca; |
| clusterCidr = "10.10.16.0/20"; |
| |
| apiserver = rec { |
| enable = true; |
| # BUG: should be 0. |
| insecurePort = 4000; |
| securePort = config.hscloud.kube.portAPIServerSecure; |
| advertiseAddress = config.hscloud.base.ipAddr; |
| |
| etcd = { |
| # Only point at our own etcd. |
| servers = [ "https://${fqdn}:2379" ]; |
| caFile = pki.etcd.kube.ca; |
| keyFile = pki.etcd.kube.key; |
| certFile = pki.etcd.kube.cert; |
| }; |
| |
| tlsCertFile = pki.kube.apiserver.cert; |
| tlsKeyFile = pki.kube.apiserver.key; |
| clientCaFile = pki.kube.apiserver.ca; |
| |
| kubeletHttps = true; |
| # Same CA as main APIServer CA. |
| kubeletClientCaFile = pki.kube.apiserver.ca; |
| kubeletClientCertFile = pki.kube.apiserver.cert; |
| kubeletClientKeyFile = pki.kube.apiserver.key; |
| |
| serviceAccountKeyFile = pki.kube.serviceaccounts.key; |
| |
| allowPrivileged = true; |
| serviceClusterIpRange = "10.10.12.0/24"; |
| runtimeConfig = "api/all,authentication.k8s.io/v1beta1"; |
| authorizationMode = [ |
| "Node" "RBAC" |
| ]; |
| enableAdmissionPlugins = [ |
| "NamespaceLifecycle" "NodeRestriction" "LimitRanger" "ServiceAccount" |
| "DefaultStorageClass" "ResourceQuota" "PodSecurityPolicy" |
| ]; |
| extraOpts = '' |
| --apiserver-count=5 \ |
| --proxy-client-cert-file=${pki.kubeFront.apiserver.cert} \ |
| --proxy-client-key-file=${pki.kubeFront.apiserver.key} \ |
| --requestheader-allowed-names= \ |
| --requestheader-client-ca-file=${pki.kubeFront.apiserver.ca} \ |
| --requestheader-extra-headers-prefix=X-Remote-Extra- \ |
| --requestheader-group-headers=X-Remote-Group \ |
| --requestheader-username-headers=X-Remote-User \ |
| -v=5 |
| ''; |
| }; |
| |
| controllerManager = let |
| top = config.services.kubernetes; |
| kubeconfig = top.lib.mkKubeConfig "controller-manager" pki.kube.controllermanager.config; |
| in { |
| enable = true; |
| bindAddress = "0.0.0.0"; |
| insecurePort = 0; |
| leaderElect = true; |
| serviceAccountKeyFile = pki.kube.serviceaccounts.key; |
| rootCaFile = pki.kube.ca; |
| extraOpts = '' |
| --service-cluster-ip-range=10.10.12.0/24 \ |
| --use-service-account-credentials=true \ |
| --secure-port=${toString cfg.portControllerManagerSecure}\ |
| --authentication-kubeconfig=${kubeconfig}\ |
| --authorization-kubeconfig=${kubeconfig}\ |
| ''; |
| kubeconfig = pki.kube.controllermanager.config; |
| }; |
| |
| scheduler = let |
| top = config.services.kubernetes; |
| kubeconfig = top.lib.mkKubeConfig "scheduler" pki.kube.scheduler.config; |
| in { |
| enable = true; |
| address = "0.0.0.0"; |
| port = 0; |
| leaderElect = true; |
| kubeconfig = pki.kube.scheduler.config; |
| extraOpts = '' |
| --secure-port=${toString cfg.portSchedulerSecure}\ |
| --authentication-kubeconfig=${kubeconfig}\ |
| --authorization-kubeconfig=${kubeconfig}\ |
| ''; |
| }; |
| }; |
| }; |
| } |
| |