k0: host os bump wip
This bumps it on bc01n01, but nowhere else yet.
We have to vendor some more kubelet bits unfortunately.
Change-Id: Ifb169dd9c2c19d60f88d946d065d4446141601b1
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/1465
Reviewed-by: implr <implr@hackerspace.pl>
diff --git a/cluster/machines/modules/base.nix b/cluster/machines/modules/base.nix
index 66335ef..054ffb8 100644
--- a/cluster/machines/modules/base.nix
+++ b/cluster/machines/modules/base.nix
@@ -29,6 +29,30 @@
description = "IPv4 address of gateway.";
};
};
+
+ # Override current nixos kubernetes with our vendored fork.
+ # Also nuke flannel from orbit.
+ disabledModules = [
+ "services/cluster/kubernetes/apiserver.nix"
+ "services/cluster/kubernetes/controller-manager.nix"
+ "services/cluster/kubernetes/default.nix"
+ "services/cluster/kubernetes/kubelet.nix"
+ "services/cluster/kubernetes/pki.nix"
+ "services/cluster/kubernetes/proxy.nix"
+ "services/cluster/kubernetes/scheduler.nix"
+ "services/cluster/kubernetes/flannel.nix"
+ ];
+
+ imports = [
+ ./vendor/apiserver.nix
+ ./vendor/controller-manager.nix
+ ./vendor/default.nix
+ ./vendor/kubelet.nix
+ ./vendor/pki.nix
+ ./vendor/proxy.nix
+ ./vendor/scheduler.nix
+ ];
+
config = rec {
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
diff --git a/cluster/machines/modules/kube-dataplane.nix b/cluster/machines/modules/kube-dataplane.nix
index f38ad84..45efcd2 100644
--- a/cluster/machines/modules/kube-dataplane.nix
+++ b/cluster/machines/modules/kube-dataplane.nix
@@ -33,19 +33,10 @@
};
};
- # Disable kubelet service and bring in our own override.
- # Also nuke flannel from the orbit.
- disabledModules = [
- "services/cluster/kubernetes/kubelet.nix"
- "services/cluster/kubernetes/flannel.nix"
- ];
-
imports = [
- ./kubelet.nix
./kube-common.nix
];
-
config = mkIf cfg.enable {
# If we're not running the control plane, render a hostsfile that points at
# all other control plane nodes. Otherwise, the control plane module will
diff --git a/cluster/machines/modules/vendor/apiserver.nix b/cluster/machines/modules/vendor/apiserver.nix
new file mode 100644
index 0000000..1f498f9
--- /dev/null
+++ b/cluster/machines/modules/vendor/apiserver.nix
@@ -0,0 +1,473 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+ { config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ top = config.services.kubernetes;
+ cfg = top.apiserver;
+
+ isRBACEnabled = elem "RBAC" cfg.authorizationMode;
+
+ apiserverServiceIP = (concatStringsSep "." (
+ take 3 (splitString "." cfg.serviceClusterIpRange
+ )) + ".1");
+in
+{
+
+ imports = [
+ (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
+ (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
+ (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
+ (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
+ (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
+ (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
+ (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
+ (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
+ ];
+
+ ###### interface
+ options.services.kubernetes.apiserver = with lib.types; {
+
+ advertiseAddress = mkOption {
+ description = ''
+ Kubernetes apiserver IP address on which to advertise the apiserver
+ to members of the cluster. This address must be reachable by the rest
+ of the cluster.
+ '';
+ default = null;
+ type = nullOr str;
+ };
+
+ allowPrivileged = mkOption {
+ description = "Whether to allow privileged containers on Kubernetes.";
+ default = false;
+ type = bool;
+ };
+
+ authorizationMode = mkOption {
+ description = ''
+ Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+ <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+ '';
+ default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
+ type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
+ };
+
+ authorizationPolicy = mkOption {
+ description = ''
+ Kubernetes apiserver authorization policy file. See
+ <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+ '';
+ default = [];
+ type = listOf attrs;
+ };
+
+ basicAuthFile = mkOption {
+ description = ''
+ Kubernetes apiserver basic authentication file. See
+ <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+ '';
+ default = null;
+ type = nullOr path;
+ };
+
+ bindAddress = mkOption {
+ description = ''
+ The IP address on which to listen for the --secure-port port.
+ The associated interface(s) must be reachable by the rest
+ of the cluster, and by CLI/web clients.
+ '';
+ default = "0.0.0.0";
+ type = str;
+ };
+
+ clientCaFile = mkOption {
+ description = "Kubernetes apiserver CA file for client auth.";
+ default = top.caFile;
+ type = nullOr path;
+ };
+
+ disableAdmissionPlugins = mkOption {
+ description = ''
+ Kubernetes admission control plugins to disable. See
+ <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+ '';
+ default = [];
+ type = listOf str;
+ };
+
+ enable = mkEnableOption "Kubernetes apiserver";
+
+ enableAdmissionPlugins = mkOption {
+ description = ''
+ Kubernetes admission control plugins to enable. See
+ <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+ '';
+ default = [
+ "NamespaceLifecycle" "LimitRanger" "ServiceAccount"
+ "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
+ "NodeRestriction"
+ ];
+ example = [
+ "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
+ "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
+ "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
+ ];
+ type = listOf str;
+ };
+
+ etcd = {
+ servers = mkOption {
+ description = "List of etcd servers.";
+ default = ["http://127.0.0.1:2379"];
+ type = types.listOf types.str;
+ };
+
+ keyFile = mkOption {
+ description = "Etcd key file.";
+ default = null;
+ type = types.nullOr types.path;
+ };
+
+ certFile = mkOption {
+ description = "Etcd cert file.";
+ default = null;
+ type = types.nullOr types.path;
+ };
+
+ caFile = mkOption {
+ description = "Etcd ca file.";
+ default = top.caFile;
+ type = types.nullOr types.path;
+ };
+ };
+
+ extraOpts = mkOption {
+ description = "Kubernetes apiserver extra command line options.";
+ default = "";
+ type = str;
+ };
+
+ extraSANs = mkOption {
+ description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
+ default = [];
+ type = listOf str;
+ };
+
+ featureGates = mkOption {
+ description = "List set of feature gates";
+ default = top.featureGates;
+ type = listOf str;
+ };
+
+ insecureBindAddress = mkOption {
+ description = "The IP address on which to serve the --insecure-port.";
+ default = "127.0.0.1";
+ type = str;
+ };
+
+ insecurePort = mkOption {
+ description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
+ default = 0;
+ type = int;
+ };
+
+ kubeletClientCaFile = mkOption {
+ description = "Path to a cert file for connecting to kubelet.";
+ default = top.caFile;
+ type = nullOr path;
+ };
+
+ kubeletClientCertFile = mkOption {
+ description = "Client certificate to use for connections to kubelet.";
+ default = null;
+ type = nullOr path;
+ };
+
+ kubeletClientKeyFile = mkOption {
+ description = "Key to use for connections to kubelet.";
+ default = null;
+ type = nullOr path;
+ };
+
+ kubeletHttps = mkOption {
+ description = "Whether to use https for connections to kubelet.";
+ default = true;
+ type = bool;
+ };
+
+ preferredAddressTypes = mkOption {
+ description = "List of the preferred NodeAddressTypes to use for kubelet connections.";
+ type = nullOr str;
+ default = null;
+ };
+
+ proxyClientCertFile = mkOption {
+ description = "Client certificate to use for connections to proxy.";
+ default = null;
+ type = nullOr path;
+ };
+
+ proxyClientKeyFile = mkOption {
+ description = "Key to use for connections to proxy.";
+ default = null;
+ type = nullOr path;
+ };
+
+ runtimeConfig = mkOption {
+ description = ''
+ Api runtime configuration. See
+ <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
+ '';
+ default = "authentication.k8s.io/v1beta1=true";
+ example = "api/all=false,api/v1=true";
+ type = str;
+ };
+
+ storageBackend = mkOption {
+ description = ''
+ Kubernetes apiserver storage backend.
+ '';
+ default = "etcd3";
+ type = enum ["etcd2" "etcd3"];
+ };
+
+ securePort = mkOption {
+ description = "Kubernetes apiserver secure port.";
+ default = 6443;
+ type = int;
+ };
+
+ serviceAccountKeyFile = mkOption {
+ description = ''
+ Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
+ used to verify ServiceAccount tokens. By default tls private key file
+ is used.
+ '';
+ default = null;
+ type = nullOr path;
+ };
+
+ serviceClusterIpRange = mkOption {
+ description = ''
+ A CIDR notation IP range from which to assign service cluster IPs.
+ This must not overlap with any IP ranges assigned to nodes for pods.
+ '';
+ default = "10.0.0.0/24";
+ type = str;
+ };
+
+ tlsCertFile = mkOption {
+ description = "Kubernetes apiserver certificate file.";
+ default = null;
+ type = nullOr path;
+ };
+
+ tlsKeyFile = mkOption {
+ description = "Kubernetes apiserver private key file.";
+ default = null;
+ type = nullOr path;
+ };
+
+ tokenAuthFile = mkOption {
+ description = ''
+ Kubernetes apiserver token authentication file. See
+ <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+ '';
+ default = null;
+ type = nullOr path;
+ };
+
+ verbosity = mkOption {
+ description = ''
+ Optional glog verbosity level for logging statements. See
+ <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+ '';
+ default = null;
+ type = nullOr int;
+ };
+
+ webhookConfig = mkOption {
+ description = ''
+ Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+ See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+ '';
+ default = null;
+ type = nullOr path;
+ };
+
+ };
+
+
+ ###### implementation
+ config = mkMerge [
+
+ (mkIf cfg.enable {
+ systemd.services.kube-apiserver = {
+ description = "Kubernetes APIServer Service";
+ wantedBy = [ "kubernetes.target" ];
+ after = [ "network.target" ];
+ serviceConfig = {
+ Slice = "kubernetes.slice";
+ ExecStart = ''${top.package}/bin/kube-apiserver \
+ --allow-privileged=${boolToString cfg.allowPrivileged} \
+ --authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
+ ${optionalString (elem "ABAC" cfg.authorizationMode)
+ "--authorization-policy-file=${
+ pkgs.writeText "kube-auth-policy.jsonl"
+ (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
+ }"
+ } \
+ ${optionalString (elem "Webhook" cfg.authorizationMode)
+ "--authorization-webhook-config-file=${cfg.webhookConfig}"
+ } \
+ --bind-address=${cfg.bindAddress} \
+ ${optionalString (cfg.advertiseAddress != null)
+ "--advertise-address=${cfg.advertiseAddress}"} \
+ ${optionalString (cfg.clientCaFile != null)
+ "--client-ca-file=${cfg.clientCaFile}"} \
+ --disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
+ --enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
+ --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
+ ${optionalString (cfg.etcd.caFile != null)
+ "--etcd-cafile=${cfg.etcd.caFile}"} \
+ ${optionalString (cfg.etcd.certFile != null)
+ "--etcd-certfile=${cfg.etcd.certFile}"} \
+ ${optionalString (cfg.etcd.keyFile != null)
+ "--etcd-keyfile=${cfg.etcd.keyFile}"} \
+ ${optionalString (cfg.featureGates != [])
+ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+ ${optionalString (cfg.basicAuthFile != null)
+ "--basic-auth-file=${cfg.basicAuthFile}"} \
+ --kubelet-https=${boolToString cfg.kubeletHttps} \
+ ${optionalString (cfg.kubeletClientCaFile != null)
+ "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
+ ${optionalString (cfg.kubeletClientCertFile != null)
+ "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
+ ${optionalString (cfg.kubeletClientKeyFile != null)
+ "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+ ${optionalString (cfg.preferredAddressTypes != null)
+ "--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"} \
+ ${optionalString (cfg.proxyClientCertFile != null)
+ "--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
+ ${optionalString (cfg.proxyClientKeyFile != null)
+ "--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
+ --insecure-bind-address=${cfg.insecureBindAddress} \
+ --insecure-port=${toString cfg.insecurePort} \
+ ${optionalString (cfg.runtimeConfig != "")
+ "--runtime-config=${cfg.runtimeConfig}"} \
+ --secure-port=${toString cfg.securePort} \
+ ${optionalString (cfg.serviceAccountKeyFile!=null)
+ "--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
+ --service-cluster-ip-range=${cfg.serviceClusterIpRange} \
+ --storage-backend=${cfg.storageBackend} \
+ ${optionalString (cfg.tlsCertFile != null)
+ "--tls-cert-file=${cfg.tlsCertFile}"} \
+ ${optionalString (cfg.tlsKeyFile != null)
+ "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+ ${optionalString (cfg.tokenAuthFile != null)
+ "--token-auth-file=${cfg.tokenAuthFile}"} \
+ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+ ${cfg.extraOpts}
+ '';
+ WorkingDirectory = top.dataDir;
+ User = "kubernetes";
+ Group = "kubernetes";
+ AmbientCapabilities = "cap_net_bind_service";
+ Restart = "on-failure";
+ RestartSec = 5;
+ };
+ };
+
+ services.etcd = {
+ clientCertAuth = mkDefault true;
+ peerClientCertAuth = mkDefault true;
+ listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
+ listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
+ advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
+ initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
+ name = mkDefault top.masterAddress;
+ initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
+ };
+
+ services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
+
+ apiserver-kubelet-api-admin-crb = {
+ apiVersion = "rbac.authorization.k8s.io/v1";
+ kind = "ClusterRoleBinding";
+ metadata = {
+ name = "system:kube-apiserver:kubelet-api-admin";
+ };
+ roleRef = {
+ apiGroup = "rbac.authorization.k8s.io";
+ kind = "ClusterRole";
+ name = "system:kubelet-api-admin";
+ };
+ subjects = [{
+ kind = "User";
+ name = "system:kube-apiserver";
+ }];
+ };
+
+ };
+
+ services.kubernetes.pki.certs = with top.lib; {
+ apiServer = mkCert {
+ name = "kube-apiserver";
+ CN = "kubernetes";
+ hosts = [
+ "kubernetes.default.svc"
+ "kubernetes.default.svc.${top.addons.dns.clusterDomain}"
+ cfg.advertiseAddress
+ top.masterAddress
+ apiserverServiceIP
+ "127.0.0.1"
+ ] ++ cfg.extraSANs;
+ action = "systemctl restart kube-apiserver.service";
+ };
+ apiserverProxyClient = mkCert {
+ name = "kube-apiserver-proxy-client";
+ CN = "front-proxy-client";
+ action = "systemctl restart kube-apiserver.service";
+ };
+ apiserverKubeletClient = mkCert {
+ name = "kube-apiserver-kubelet-client";
+ CN = "system:kube-apiserver";
+ action = "systemctl restart kube-apiserver.service";
+ };
+ apiserverEtcdClient = mkCert {
+ name = "kube-apiserver-etcd-client";
+ CN = "etcd-client";
+ action = "systemctl restart kube-apiserver.service";
+ };
+ clusterAdmin = mkCert {
+ name = "cluster-admin";
+ CN = "cluster-admin";
+ fields = {
+ O = "system:masters";
+ };
+ privateKeyOwner = "root";
+ };
+ etcd = mkCert {
+ name = "etcd";
+ CN = top.masterAddress;
+ hosts = [
+ "etcd.local"
+ "etcd.${top.addons.dns.clusterDomain}"
+ top.masterAddress
+ cfg.advertiseAddress
+ ];
+ privateKeyOwner = "etcd";
+ action = "systemctl restart etcd.service";
+ };
+ };
+
+ })
+
+ ];
+
+}
diff --git a/cluster/machines/modules/vendor/controller-manager.nix b/cluster/machines/modules/vendor/controller-manager.nix
new file mode 100644
index 0000000..cd0dc03
--- /dev/null
+++ b/cluster/machines/modules/vendor/controller-manager.nix
@@ -0,0 +1,171 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ top = config.services.kubernetes;
+ cfg = top.controllerManager;
+in
+{
+ imports = [
+ (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
+ (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
+ ];
+
+ ###### interface
+ options.services.kubernetes.controllerManager = with lib.types; {
+
+ allocateNodeCIDRs = mkOption {
+ description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
+ default = true;
+ type = bool;
+ };
+
+ bindAddress = mkOption {
+ description = "Kubernetes controller manager listening address.";
+ default = "127.0.0.1";
+ type = str;
+ };
+
+ clusterCidr = mkOption {
+ description = "Kubernetes CIDR Range for Pods in cluster.";
+ default = top.clusterCidr;
+ type = str;
+ };
+
+ enable = mkEnableOption "Kubernetes controller manager";
+
+ extraOpts = mkOption {
+ description = "Kubernetes controller manager extra command line options.";
+ default = "";
+ type = str;
+ };
+
+ featureGates = mkOption {
+ description = "List set of feature gates";
+ default = top.featureGates;
+ type = listOf str;
+ };
+
+ insecurePort = mkOption {
+ description = "Kubernetes controller manager insecure listening port.";
+ default = 0;
+ type = int;
+ };
+
+ kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
+
+ leaderElect = mkOption {
+ description = "Whether to start leader election before executing main loop.";
+ type = bool;
+ default = true;
+ };
+
+ rootCaFile = mkOption {
+ description = ''
+ Kubernetes controller manager certificate authority file included in
+ service account's token secret.
+ '';
+ default = top.caFile;
+ type = nullOr path;
+ };
+
+ securePort = mkOption {
+ description = "Kubernetes controller manager secure listening port.";
+ default = 10252;
+ type = int;
+ };
+
+ serviceAccountKeyFile = mkOption {
+ description = ''
+ Kubernetes controller manager PEM-encoded private RSA key file used to
+ sign service account tokens
+ '';
+ default = null;
+ type = nullOr path;
+ };
+
+ tlsCertFile = mkOption {
+ description = "Kubernetes controller-manager certificate file.";
+ default = null;
+ type = nullOr path;
+ };
+
+ tlsKeyFile = mkOption {
+ description = "Kubernetes controller-manager private key file.";
+ default = null;
+ type = nullOr path;
+ };
+
+ verbosity = mkOption {
+ description = ''
+ Optional glog verbosity level for logging statements. See
+ <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+ '';
+ default = null;
+ type = nullOr int;
+ };
+
+ };
+
+ ###### implementation
+ config = mkIf cfg.enable {
+ systemd.services.kube-controller-manager = {
+ description = "Kubernetes Controller Manager Service";
+ wantedBy = [ "kubernetes.target" ];
+ after = [ "kube-apiserver.service" ];
+ serviceConfig = {
+ RestartSec = "30s";
+ Restart = "on-failure";
+ Slice = "kubernetes.slice";
+ ExecStart = ''${top.package}/bin/kube-controller-manager \
+ --allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
+ --bind-address=${cfg.bindAddress} \
+ ${optionalString (cfg.clusterCidr!=null)
+ "--cluster-cidr=${cfg.clusterCidr}"} \
+ ${optionalString (cfg.featureGates != [])
+ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+ --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
+ --leader-elect=${boolToString cfg.leaderElect} \
+ ${optionalString (cfg.rootCaFile!=null)
+ "--root-ca-file=${cfg.rootCaFile}"} \
+ --port=${toString cfg.insecurePort} \
+ --secure-port=${toString cfg.securePort} \
+ ${optionalString (cfg.serviceAccountKeyFile!=null)
+ "--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
+ ${optionalString (cfg.tlsCertFile!=null)
+ "--tls-cert-file=${cfg.tlsCertFile}"} \
+ ${optionalString (cfg.tlsKeyFile!=null)
+ "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+ ${optionalString (elem "RBAC" top.apiserver.authorizationMode)
+ "--use-service-account-credentials"} \
+ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+ ${cfg.extraOpts}
+ '';
+ WorkingDirectory = top.dataDir;
+ User = "kubernetes";
+ Group = "kubernetes";
+ };
+ path = top.path;
+ };
+
+ services.kubernetes.pki.certs = with top.lib; {
+ controllerManager = mkCert {
+ name = "kube-controller-manager";
+ CN = "kube-controller-manager";
+ action = "systemctl restart kube-controller-manager.service";
+ };
+ controllerManagerClient = mkCert {
+ name = "kube-controller-manager-client";
+ CN = "system:kube-controller-manager";
+ action = "systemctl restart kube-controller-manager.service";
+ };
+ };
+
+ services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
+ };
+}
diff --git a/cluster/machines/modules/vendor/default.nix b/cluster/machines/modules/vendor/default.nix
new file mode 100644
index 0000000..9194544
--- /dev/null
+++ b/cluster/machines/modules/vendor/default.nix
@@ -0,0 +1,291 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.services.kubernetes;
+
+ mkKubeConfig = name: conf: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
+ apiVersion = "v1";
+ kind = "Config";
+ clusters = [{
+ name = "local";
+ cluster.certificate-authority = conf.caFile or cfg.caFile;
+ cluster.server = conf.server;
+ }];
+ users = [{
+ inherit name;
+ user = {
+ client-certificate = conf.certFile;
+ client-key = conf.keyFile;
+ };
+ }];
+ contexts = [{
+ context = {
+ cluster = "local";
+ user = name;
+ };
+ current-context = "local";
+ }];
+ });
+
+ caCert = secret "ca";
+
+ etcdEndpoints = ["https://${cfg.masterAddress}:2379"];
+
+ mkCert = { name, CN, hosts ? [], fields ? {}, action ? "",
+ privateKeyOwner ? "kubernetes" }: rec {
+ inherit name caCert CN hosts fields action;
+ cert = secret name;
+ key = secret "${name}-key";
+ privateKeyOptions = {
+ owner = privateKeyOwner;
+ group = "nogroup";
+ mode = "0600";
+ path = key;
+ };
+ };
+
+ secret = name: "${cfg.secretsPath}/${name}.pem";
+
+ mkKubeConfigOptions = prefix: {
+ server = mkOption {
+ description = "${prefix} kube-apiserver server address.";
+ type = types.str;
+ };
+
+ caFile = mkOption {
+ description = "${prefix} certificate authority file used to connect to kube-apiserver.";
+ type = types.nullOr types.path;
+ default = cfg.caFile;
+ };
+
+ certFile = mkOption {
+ description = "${prefix} client certificate file used to connect to kube-apiserver.";
+ type = types.nullOr types.path;
+ default = null;
+ };
+
+ keyFile = mkOption {
+ description = "${prefix} client key file used to connect to kube-apiserver.";
+ type = types.nullOr types.path;
+ default = null;
+ };
+ };
+in {
+
+ imports = [
+ (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
+ ];
+
+ ###### interface
+
+ options.services.kubernetes = {
+ roles = mkOption {
+ description = ''
+ Kubernetes role that this machine should take.
+
+ Master role will enable etcd, apiserver, scheduler, controller manager
+ addon manager, flannel and proxy services.
+ Node role will enable flannel, docker, kubelet and proxy services.
+ '';
+ default = [];
+ type = types.listOf (types.enum ["master" "node"]);
+ };
+
+ package = mkOption {
+ description = "Kubernetes package to use.";
+ type = types.package;
+ default = pkgs.kubernetes;
+ defaultText = "pkgs.kubernetes";
+ };
+
+ kubeconfig = mkKubeConfigOptions "Default kubeconfig";
+
+ apiserverAddress = mkOption {
+ description = ''
+ Clusterwide accessible address for the kubernetes apiserver,
+ including protocol and optional port.
+ '';
+ example = "https://kubernetes-apiserver.example.com:6443";
+ type = types.str;
+ };
+
+ caFile = mkOption {
+ description = "Default kubernetes certificate authority";
+ type = types.nullOr types.path;
+ default = null;
+ };
+
+ dataDir = mkOption {
+ description = "Kubernetes root directory for managing kubelet files.";
+ default = "/var/lib/kubernetes";
+ type = types.path;
+ };
+
+ easyCerts = mkOption {
+ description = "Automatically setup x509 certificates and keys for the entire cluster.";
+ default = false;
+ type = types.bool;
+ };
+
+ featureGates = mkOption {
+ description = "List set of feature gates.";
+ default = [];
+ type = types.listOf types.str;
+ };
+
+ masterAddress = mkOption {
+ description = "Clusterwide available network address or hostname for the kubernetes master server.";
+ example = "master.example.com";
+ type = types.str;
+ };
+
+ path = mkOption {
+ description = "Packages added to the services' PATH environment variable. Both the bin and sbin subdirectories of each package are added.";
+ type = types.listOf types.package;
+ default = [];
+ };
+
+ clusterCidr = mkOption {
+ description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster.";
+ default = "10.1.0.0/16";
+ type = types.nullOr types.str;
+ };
+
+ lib = mkOption {
+ description = "Common functions for the kubernetes modules.";
+ default = {
+ inherit mkCert;
+ inherit mkKubeConfig;
+ inherit mkKubeConfigOptions;
+ };
+ type = types.attrs;
+ };
+
+ secretsPath = mkOption {
+ description = "Default location for kubernetes secrets. Not a store location.";
+ type = types.path;
+ default = cfg.dataDir + "/secrets";
+ };
+ };
+
+ ###### implementation
+
+ config = mkMerge [
+
+ (mkIf cfg.easyCerts {
+ services.kubernetes.pki.enable = mkDefault true;
+ services.kubernetes.caFile = caCert;
+ })
+
+ (mkIf (elem "master" cfg.roles) {
+ services.kubernetes.apiserver.enable = mkDefault true;
+ services.kubernetes.scheduler.enable = mkDefault true;
+ services.kubernetes.controllerManager.enable = mkDefault true;
+ services.kubernetes.addonManager.enable = mkDefault true;
+ services.kubernetes.proxy.enable = mkDefault true;
+ services.etcd.enable = true; # Cannot mkDefault because of flannel default options
+ services.kubernetes.kubelet = {
+ enable = mkDefault true;
+ taints = mkIf (!(elem "node" cfg.roles)) {
+ master = {
+ key = "node-role.kubernetes.io/master";
+ value = "true";
+ effect = "NoSchedule";
+ };
+ };
+ };
+ })
+
+
+ (mkIf (all (el: el == "master") cfg.roles) {
+ # if this node is only a master make it unschedulable by default
+ services.kubernetes.kubelet.unschedulable = mkDefault true;
+ })
+
+ (mkIf (elem "node" cfg.roles) {
+ services.kubernetes.kubelet.enable = mkDefault true;
+ services.kubernetes.proxy.enable = mkDefault true;
+ })
+
+ # Using "services.kubernetes.roles" will automatically enable easyCerts and flannel
+ (mkIf (cfg.roles != []) {
+ services.kubernetes.flannel.enable = mkDefault true;
+ services.flannel.etcd.endpoints = mkDefault etcdEndpoints;
+ services.kubernetes.easyCerts = mkDefault true;
+ })
+
+ (mkIf cfg.apiserver.enable {
+ services.kubernetes.pki.etcClusterAdminKubeconfig = mkDefault "kubernetes/cluster-admin.kubeconfig";
+ services.kubernetes.apiserver.etcd.servers = mkDefault etcdEndpoints;
+ })
+
+ (mkIf cfg.kubelet.enable {
+ virtualisation.docker = {
+ enable = mkDefault true;
+
+ # kubernetes needs access to logs
+ logDriver = mkDefault "json-file";
+
+ # iptables must be disabled for kubernetes
+ extraOptions = "--iptables=false --ip-masq=false";
+ };
+ })
+
+ (mkIf (cfg.apiserver.enable || cfg.controllerManager.enable) {
+ services.kubernetes.pki.certs = {
+ serviceAccount = mkCert {
+ name = "service-account";
+ CN = "system:service-account-signer";
+ action = ''
+ systemctl reload \
+ kube-apiserver.service \
+ kube-controller-manager.service
+ '';
+ };
+ };
+ })
+
+ (mkIf (
+ cfg.apiserver.enable ||
+ cfg.scheduler.enable ||
+ cfg.controllerManager.enable ||
+ cfg.kubelet.enable ||
+ cfg.proxy.enable ||
+ cfg.addonManager.enable
+ ) {
+ systemd.targets.kubernetes = {
+ description = "Kubernetes";
+ wantedBy = [ "multi-user.target" ];
+ };
+
+ systemd.tmpfiles.rules = [
+ "d /opt/cni/bin 0755 root root -"
+ "d /run/kubernetes 0755 kubernetes kubernetes -"
+ "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
+ ];
+
+ users.users.kubernetes = {
+ uid = config.ids.uids.kubernetes;
+ description = "Kubernetes user";
+ extraGroups = [ "docker" ];
+ group = "kubernetes";
+ home = cfg.dataDir;
+ createHome = true;
+ };
+ users.groups.kubernetes.gid = config.ids.gids.kubernetes;
+
+ # dns addon is enabled by default
+ services.kubernetes.addons.dns.enable = mkDefault true;
+
+ services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
+ then cfg.apiserver.advertiseAddress
+ else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
+ })
+ ];
+}
diff --git a/cluster/machines/modules/kubelet.nix b/cluster/machines/modules/vendor/kubelet.nix
similarity index 97%
rename from cluster/machines/modules/kubelet.nix
rename to cluster/machines/modules/vendor/kubelet.nix
index 79da4cb..f896695 100644
--- a/cluster/machines/modules/kubelet.nix
+++ b/cluster/machines/modules/vendor/kubelet.nix
@@ -1,3 +1,7 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+#
# Same as upstream kubelet.nix module from nixpkgs, but with the following
# changes:
# - cni tunables nuked and replaced with static host dirs, so that calico
@@ -226,7 +230,7 @@
TasksMax = "infinity";
OOMScoreAdjust = "-999";
- ExecStart = "${pkgs.containerd}/bin/containerd -c ${./containerd.toml}";
+ ExecStart = "${pkgs.containerd}/bin/containerd -c ${../containerd.toml}";
};
};
diff --git a/cluster/machines/modules/vendor/pki.nix b/cluster/machines/modules/vendor/pki.nix
new file mode 100644
index 0000000..fa40e88
--- /dev/null
+++ b/cluster/machines/modules/vendor/pki.nix
@@ -0,0 +1,405 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ top = config.services.kubernetes;
+ cfg = top.pki;
+
+ csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
+ key = {
+ algo = "rsa";
+ size = 2048;
+ };
+ names = singleton cfg.caSpec;
+ });
+
+ csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
+ key = {
+ algo = "rsa";
+ size = 2048;
+ };
+ CN = top.masterAddress;
+ hosts = [top.masterAddress] ++ cfg.cfsslAPIExtraSANs;
+ });
+
+ cfsslAPITokenBaseName = "apitoken.secret";
+ cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
+ certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
+ cfsslAPITokenLength = 32;
+
+ clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+ top.lib.mkKubeConfig "cluster-admin" {
+ server = top.apiserverAddress;
+ certFile = cert;
+ keyFile = key;
+ };
+
+ remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
+in
+{
+ ###### interface
+ options.services.kubernetes.pki = with lib.types; {
+
+ enable = mkEnableOption "easyCert issuer service";
+
+ certs = mkOption {
+ description = "List of certificate specs to feed to cert generator.";
+ default = {};
+ type = attrs;
+ };
+
+ genCfsslCACert = mkOption {
+ description = ''
+ Whether to automatically generate cfssl CA certificate and key,
+ if they don't exist.
+ '';
+ default = true;
+ type = bool;
+ };
+
+ genCfsslAPICerts = mkOption {
+ description = ''
+ Whether to automatically generate cfssl API webserver TLS cert and key,
+ if they don't exist.
+ '';
+ default = true;
+ type = bool;
+ };
+
+ cfsslAPIExtraSANs = mkOption {
+ description = ''
+ Extra x509 Subject Alternative Names to be added to the cfssl API webserver TLS cert.
+ '';
+ default = [];
+ example = [ "subdomain.example.com" ];
+ type = listOf str;
+ };
+
+ genCfsslAPIToken = mkOption {
+ description = ''
+ Whether to automatically generate cfssl API-token secret,
+ if they doesn't exist.
+ '';
+ default = true;
+ type = bool;
+ };
+
+ pkiTrustOnBootstrap = mkOption {
+ description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
+ default = true;
+ type = bool;
+ };
+
+ caCertPathPrefix = mkOption {
+ description = ''
+ Path-prefrix for the CA-certificate to be used for cfssl signing.
+ Suffixes ".pem" and "-key.pem" will be automatically appended for
+ the public and private keys respectively.
+ '';
+ default = "${config.services.cfssl.dataDir}/ca";
+ type = str;
+ };
+
+ caSpec = mkOption {
+ description = "Certificate specification for the auto-generated CAcert.";
+ default = {
+ CN = "kubernetes-cluster-ca";
+ O = "NixOS";
+ OU = "services.kubernetes.pki.caSpec";
+ L = "auto-generated";
+ };
+ type = attrs;
+ };
+
+ etcClusterAdminKubeconfig = mkOption {
+ description = ''
+ Symlink a kubeconfig with cluster-admin privileges to environment path
+ (/etc/<path>).
+ '';
+ default = null;
+ type = nullOr str;
+ };
+
+ };
+
+ ###### implementation
+ config = mkIf cfg.enable
+ (let
+ cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
+ cfsslCert = "${cfsslCertPathPrefix}.pem";
+ cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+ in
+ {
+
+ services.cfssl = mkIf (top.apiserver.enable) {
+ enable = true;
+ address = "0.0.0.0";
+ tlsCert = cfsslCert;
+ tlsKey = cfsslKey;
+ configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
+ signing = {
+ profiles = {
+ default = {
+ usages = ["digital signature"];
+ auth_key = "default";
+ expiry = "720h";
+ };
+ };
+ };
+ auth_keys = {
+ default = {
+ type = "standard";
+ key = "file:${cfsslAPITokenPath}";
+ };
+ };
+ }));
+ };
+
+ systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
+ (concatStringsSep "\n" [
+ "set -e"
+ (optionalString cfg.genCfsslCACert ''
+ if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
+ ${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
+ ${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
+ fi
+ '')
+ (optionalString cfg.genCfsslAPICerts ''
+ if [ ! -f "${dataDir}/cfssl.pem" ]; then
+ ${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
+ ${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
+ fi
+ '')
+ (optionalString cfg.genCfsslAPIToken ''
+ if [ ! -f "${cfsslAPITokenPath}" ]; then
+ head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
+ fi
+ chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
+ '')]);
+
+ systemd.services.kube-certmgr-bootstrap = {
+ description = "Kubernetes certmgr bootstrapper";
+ wantedBy = [ "certmgr.service" ];
+ after = [ "cfssl.target" ];
+ script = concatStringsSep "\n" [''
+ set -e
+
+ # If there's a cfssl (cert issuer) running locally, then don't rely on user to
+ # manually paste it in place. Just symlink.
+ # otherwise, create the target file, ready for users to insert the token
+
+ if [ -f "${cfsslAPITokenPath}" ]; then
+ ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
+ else
+ touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
+ fi
+ ''
+ (optionalString (cfg.pkiTrustOnBootstrap) ''
+ if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+ ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+ ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
+ fi
+ '')
+ ];
+ serviceConfig = {
+ RestartSec = "10s";
+ Restart = "on-failure";
+ };
+ };
+
+ services.certmgr = {
+ enable = true;
+ package = pkgs.certmgr-selfsigned;
+ svcManager = "command";
+ specs =
+ let
+ mkSpec = _: cert: {
+ inherit (cert) action;
+ authority = {
+ inherit remote;
+ file.path = cert.caCert;
+ root_ca = cert.caCert;
+ profile = "default";
+ auth_key_file = certmgrAPITokenPath;
+ };
+ certificate = {
+ path = cert.cert;
+ };
+ private_key = cert.privateKeyOptions;
+ request = {
+ hosts = [cert.CN] ++ cert.hosts;
+ inherit (cert) CN;
+ key = {
+ algo = "rsa";
+ size = 2048;
+ };
+ names = [ cert.fields ];
+ };
+ };
+ in
+ mapAttrs mkSpec cfg.certs;
+ };
+
+ #TODO: Get rid of kube-addon-manager in the future for the following reasons
+ # - it is basically just a shell script wrapped around kubectl
+ # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+ # - it is designed to be used with k8s system components only
+ # - it would be better with a more Nix-oriented way of managing addons
+ systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+ environment.KUBECONFIG = with cfg.certs.addonManager;
+ top.lib.mkKubeConfig "addon-manager" {
+ server = top.apiserverAddress;
+ certFile = cert;
+ keyFile = key;
+ };
+ }
+
+ (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+ serviceConfig.PermissionsStartOnly = true;
+ preStart = with pkgs;
+ let
+ files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+ top.addonManager.bootstrapAddons;
+ in
+ ''
+ export KUBECONFIG=${clusterAdminKubeconfig}
+ ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+ '';
+ })]);
+
+ environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+ clusterAdminKubeconfig;
+
+ environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
+ (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
+ set -e
+ exec 1>&2
+
+ if [ $# -gt 0 ]; then
+ echo "Usage: $(basename $0)"
+ echo ""
+ echo "No args. Apitoken must be provided on stdin."
+ echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
+ exit 1
+ fi
+
+ if [ $(id -u) != 0 ]; then
+ echo "Run as root please."
+ exit 1
+ fi
+
+ read -r token
+ if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
+ echo "Token must be of length ${toString cfsslAPITokenLength}."
+ exit 1
+ fi
+
+ echo $token > ${certmgrAPITokenPath}
+ chmod 600 ${certmgrAPITokenPath}
+
+ echo "Restarting certmgr..." >&1
+ systemctl restart certmgr
+
+ echo "Waiting for certs to appear..." >&1
+
+ ${optionalString top.kubelet.enable ''
+ while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+ echo "Restarting kubelet..." >&1
+ systemctl restart kubelet
+ ''}
+
+ ${optionalString top.proxy.enable ''
+ while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+ echo "Restarting kube-proxy..." >&1
+ systemctl restart kube-proxy
+ ''}
+
+ ${optionalString top.flannel.enable ''
+ while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+ echo "Restarting flannel..." >&1
+ systemctl restart flannel
+ ''}
+
+ echo "Node joined succesfully"
+ '')];
+
+ # isolate etcd on loopback at the master node
+ # easyCerts doesn't support multimaster clusters anyway atm.
+ services.etcd = with cfg.certs.etcd; {
+ listenClientUrls = ["https://127.0.0.1:2379"];
+ listenPeerUrls = ["https://127.0.0.1:2380"];
+ advertiseClientUrls = ["https://etcd.local:2379"];
+ initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
+ initialAdvertisePeerUrls = ["https://etcd.local:2380"];
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ trustedCaFile = mkDefault caCert;
+ };
+ networking.extraHosts = mkIf (config.services.etcd.enable) ''
+ 127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
+ '';
+
+ services.flannel = with cfg.certs.flannelClient; {
+ kubeconfig = top.lib.mkKubeConfig "flannel" {
+ server = top.apiserverAddress;
+ certFile = cert;
+ keyFile = key;
+ };
+ };
+
+ services.kubernetes = {
+
+ apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
+ etcd = with cfg.certs.apiserverEtcdClient; {
+ servers = ["https://etcd.local:2379"];
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ caFile = mkDefault caCert;
+ };
+ clientCaFile = mkDefault caCert;
+ tlsCertFile = mkDefault cert;
+ tlsKeyFile = mkDefault key;
+ serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
+ kubeletClientCaFile = mkDefault caCert;
+ kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
+ kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+ proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
+ proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
+ });
+ controllerManager = mkIf top.controllerManager.enable {
+ serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
+ rootCaFile = cfg.certs.controllerManagerClient.caCert;
+ kubeconfig = with cfg.certs.controllerManagerClient; {
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ };
+ };
+ scheduler = mkIf top.scheduler.enable {
+ kubeconfig = with cfg.certs.schedulerClient; {
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ };
+ };
+ kubelet = mkIf top.kubelet.enable {
+ clientCaFile = mkDefault cfg.certs.kubelet.caCert;
+ tlsCertFile = mkDefault cfg.certs.kubelet.cert;
+ tlsKeyFile = mkDefault cfg.certs.kubelet.key;
+ kubeconfig = with cfg.certs.kubeletClient; {
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ };
+ };
+ proxy = mkIf top.proxy.enable {
+ kubeconfig = with cfg.certs.kubeProxyClient; {
+ certFile = mkDefault cert;
+ keyFile = mkDefault key;
+ };
+ };
+ };
+ });
+}
diff --git a/cluster/machines/modules/vendor/proxy.nix b/cluster/machines/modules/vendor/proxy.nix
new file mode 100644
index 0000000..84eabec
--- /dev/null
+++ b/cluster/machines/modules/vendor/proxy.nix
@@ -0,0 +1,98 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ top = config.services.kubernetes;
+ cfg = top.proxy;
+in
+{
+ imports = [
+ (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
+ ];
+
+ ###### interface
+ options.services.kubernetes.proxy = with lib.types; {
+
+ bindAddress = mkOption {
+ description = "Kubernetes proxy listening address.";
+ default = "0.0.0.0";
+ type = str;
+ };
+
+ enable = mkEnableOption "Kubernetes proxy";
+
+ extraOpts = mkOption {
+ description = "Kubernetes proxy extra command line options.";
+ default = "";
+ type = str;
+ };
+
+ featureGates = mkOption {
+ description = "List set of feature gates";
+ default = top.featureGates;
+ type = listOf str;
+ };
+
+ hostname = mkOption {
+ description = "Kubernetes proxy hostname override.";
+ default = config.networking.hostName;
+ type = str;
+ };
+
+ kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
+
+ verbosity = mkOption {
+ description = ''
+ Optional glog verbosity level for logging statements. See
+ <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+ '';
+ default = null;
+ type = nullOr int;
+ };
+
+ };
+
+ ###### implementation
+ config = mkIf cfg.enable {
+ systemd.services.kube-proxy = {
+ description = "Kubernetes Proxy Service";
+ wantedBy = [ "kubernetes.target" ];
+ after = [ "kube-apiserver.service" ];
+ path = with pkgs; [ iptables conntrack_tools ];
+ serviceConfig = {
+ Slice = "kubernetes.slice";
+ ExecStart = ''${top.package}/bin/kube-proxy \
+ --bind-address=${cfg.bindAddress} \
+ ${optionalString (top.clusterCidr!=null)
+ "--cluster-cidr=${top.clusterCidr}"} \
+ ${optionalString (cfg.featureGates != [])
+ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+ --hostname-override=${cfg.hostname} \
+ --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
+ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+ ${cfg.extraOpts}
+ '';
+ WorkingDirectory = top.dataDir;
+ Restart = "on-failure";
+ RestartSec = 5;
+ };
+ };
+
+ services.kubernetes.proxy.hostname = with config.networking; mkDefault hostName;
+
+ services.kubernetes.pki.certs = {
+ kubeProxyClient = top.lib.mkCert {
+ name = "kube-proxy-client";
+ CN = "system:kube-proxy";
+ action = "systemctl restart kube-proxy.service";
+ };
+ };
+
+ services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
+ };
+}
diff --git a/cluster/machines/modules/vendor/scheduler.nix b/cluster/machines/modules/vendor/scheduler.nix
new file mode 100644
index 0000000..b2d45a2
--- /dev/null
+++ b/cluster/machines/modules/vendor/scheduler.nix
@@ -0,0 +1,98 @@
+# Vendored from nixpkgs git 44ad80ab1036c5cc83ada4bfa451dac9939f2a10
+# Copyright (c) 2003-2023 Eelco Dolstra and the Nixpkgs/NixOS contributors
+# SPDX-License-Identifier: MIT
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ top = config.services.kubernetes;
+ cfg = top.scheduler;
+in
+{
+ ###### interface
+ options.services.kubernetes.scheduler = with lib.types; {
+
+ address = mkOption {
+ description = "Kubernetes scheduler listening address.";
+ default = "127.0.0.1";
+ type = str;
+ };
+
+ enable = mkEnableOption "Kubernetes scheduler";
+
+ extraOpts = mkOption {
+ description = "Kubernetes scheduler extra command line options.";
+ default = "";
+ type = str;
+ };
+
+ featureGates = mkOption {
+ description = "List set of feature gates";
+ default = top.featureGates;
+ type = listOf str;
+ };
+
+ kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
+
+ leaderElect = mkOption {
+ description = "Whether to start leader election before executing main loop.";
+ type = bool;
+ default = true;
+ };
+
+ port = mkOption {
+ description = "Kubernetes scheduler listening port.";
+ default = 10251;
+ type = int;
+ };
+
+ verbosity = mkOption {
+ description = ''
+ Optional glog verbosity level for logging statements. See
+ <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+ '';
+ default = null;
+ type = nullOr int;
+ };
+
+ };
+
+ ###### implementation
+ config = mkIf cfg.enable {
+ systemd.services.kube-scheduler = {
+ description = "Kubernetes Scheduler Service";
+ wantedBy = [ "kubernetes.target" ];
+ after = [ "kube-apiserver.service" ];
+ serviceConfig = {
+ Slice = "kubernetes.slice";
+ ExecStart = ''${top.package}/bin/kube-scheduler \
+ --address=${cfg.address} \
+ ${optionalString (cfg.featureGates != [])
+ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+ --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
+ --leader-elect=${boolToString cfg.leaderElect} \
+ --port=${toString cfg.port} \
+ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+ ${cfg.extraOpts}
+ '';
+ WorkingDirectory = top.dataDir;
+ User = "kubernetes";
+ Group = "kubernetes";
+ Restart = "on-failure";
+ RestartSec = 5;
+ };
+ };
+
+ services.kubernetes.pki.certs = {
+ schedulerClient = top.lib.mkCert {
+ name = "kube-scheduler-client";
+ CN = "system:kube-scheduler";
+ action = "systemctl restart kube-scheduler.service";
+ };
+ };
+
+ services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
+ };
+}