Serge Bazanski | 2414afe | 2021-05-24 15:08:06 +0200 | [diff] [blame] | 1 | // Deploys identd, an ident protocol (RFC1413) service on all cluster nodes. |
| 2 | // |
| 3 | // See //cluster/identd for more information about the service and how it |
| 4 | // works. |
| 5 | // |
| 6 | // Deployment notes: |
| 7 | // - We run the service within the host network namespace, as that's the only |
| 8 | // way to reliably route traffic destined for a given node into a service |
| 9 | // running on that node. We could use NodePort services, but low-numbered |
| 10 | // ports are denied by kubelet with out current configuration. |
| 11 | // - We pass the host containerd socket to the service, and the service runs |
| 12 | // as root. This means that the service basically has root on the machine. |
| 13 | // The fact that it's directly exposed to the Internet isn't great, but the |
| 14 | // only alternative seems to to split this up into two services - one |
| 15 | // responsible for maintaining the state of translations/pods (running as |
| 16 | // root), another responding to the actual queries. Considering this is Go |
| 17 | // and we run a bare minimum of code (no mirko default service), this is |
| 18 | // probably good enough and not worth the extra complexity. |
| 19 | // - The service has to be able to resolve the node IP on which it's running, |
| 20 | // to know what address to look up in the conntrack table. Currently it does |
| 21 | // so by connection to the k8s API and getting information about its own pod |
| 22 | // and then getting the node's IP address from there. |
| 23 | // This might be overly complicated, as perhaps we can |
| 24 | // 1. figure out the server IP from the incoming ident connections, or |
| 25 | // 2. find a better way to retrieve the nodeIP via the 'downstream API' |
| 26 | // TODO(q3k): figure this out. |
| 27 | |
| 28 | local kube = import "../../../kube/kube.libsonnet"; |
| 29 | local policies = import "../../../kube/policies.libsonnet"; |
| 30 | |
| 31 | { |
| 32 | Environment: { |
| 33 | local env = self, |
| 34 | local cfg = env.cfg, |
| 35 | cfg:: { |
| 36 | namespace: "identd", |
| 37 | image: "registry.k0.hswaw.net/q3k/identd:315532800-9f29c14dad77036d0820948139b5aee3fac25d59", |
| 38 | }, |
| 39 | |
| 40 | namespace: kube.Namespace(cfg.namespace), |
| 41 | local ns = self.namespace, |
| 42 | |
| 43 | allowInsecure: policies.AllowNamespaceInsecure(cfg.namespace), |
| 44 | |
| 45 | sa: ns.Contain(kube.ServiceAccount("identd")), |
| 46 | role: ns.Contain(kube.Role("access-pod-info")) { |
| 47 | rules: [ |
| 48 | { |
| 49 | apiGroups: [""], |
| 50 | resources: ["pods"], |
| 51 | verbs: ["get"], |
| 52 | }, |
| 53 | ], |
| 54 | }, |
| 55 | rb: ns.Contain(kube.RoleBinding("identd")) { |
| 56 | roleRef_: env.role, |
| 57 | subjects_: [env.sa], |
| 58 | }, |
| 59 | |
| 60 | daemonset: ns.Contain(kube.DaemonSet("identd")) { |
| 61 | spec+: { |
| 62 | template+: { |
| 63 | spec+: { |
| 64 | serviceAccountName: env.sa.metadata.name, |
| 65 | hostNetwork: true, |
| 66 | containers_: { |
| 67 | default: kube.Container("default") { |
| 68 | image: cfg.image, |
| 69 | env_: { |
| 70 | POD_NAME: kube.FieldRef("metadata.name"), |
| 71 | POD_NAMESPACE: kube.FieldRef("metadata.namespace"), |
| 72 | }, |
| 73 | command: [ |
| 74 | "/cluster/identd/identd", |
| 75 | "-identd_listen", "0.0.0.0:113", |
| 76 | "-identd_conntrack_proc", "/host/conntrack", |
| 77 | "-identd_containerd_socket", "/host/containerd.sock", |
| 78 | // Used by the service to figure out which |
| 79 | // node it's running on. |
| 80 | "-identd_pod_name", "$(POD_NAME)", |
| 81 | "-identd_pod_namespace", "$(POD_NAMESPACE)", |
| 82 | "-logtostderr", |
| 83 | ], |
| 84 | volumeMounts_: { |
| 85 | conntrack: { mountPath: "/host/conntrack", }, |
| 86 | containerd: { mountPath: "/host/containerd.sock", }, |
| 87 | }, |
| 88 | resources: { |
| 89 | requests: { |
| 90 | cpu: "0.1", |
| 91 | memory: "64M", |
| 92 | }, |
| 93 | // Allow identd to spike to 1 CPU. This |
| 94 | // makes it faster when fetching |
| 95 | // information from containerd. |
| 96 | limits: { |
| 97 | cpu: "1", |
| 98 | memory: "256M", |
| 99 | }, |
| 100 | }, |
| 101 | }, |
| 102 | }, |
| 103 | volumes_: { |
| 104 | conntrack: kube.HostPathVolume("/proc/net/nf_conntrack", "File"), |
| 105 | containerd: kube.HostPathVolume("/var/run/containerd/containerd.sock", "Socket"), |
| 106 | }, |
| 107 | }, |
| 108 | }, |
| 109 | }, |
| 110 | }, |
| 111 | } |
| 112 | } |