blob: dcc181b205f00839a72d032bf4f13457cad00b8e [file] [log] [blame]
// Deploys identd, an ident protocol (RFC1413) service on all cluster nodes.
//
// See //cluster/identd for more information about the service and how it
// works.
//
// Deployment notes:
// - We run the service within the host network namespace, as that's the only
// way to reliably route traffic destined for a given node into a service
// running on that node. We could use NodePort services, but low-numbered
// ports are denied by kubelet with out current configuration.
// - We pass the host containerd socket to the service, and the service runs
// as root. This means that the service basically has root on the machine.
// The fact that it's directly exposed to the Internet isn't great, but the
// only alternative seems to to split this up into two services - one
// responsible for maintaining the state of translations/pods (running as
// root), another responding to the actual queries. Considering this is Go
// and we run a bare minimum of code (no mirko default service), this is
// probably good enough and not worth the extra complexity.
// - The service has to be able to resolve the node IP on which it's running,
// to know what address to look up in the conntrack table. Currently it does
// so by connection to the k8s API and getting information about its own pod
// and then getting the node's IP address from there.
// This might be overly complicated, as perhaps we can
// 1. figure out the server IP from the incoming ident connections, or
// 2. find a better way to retrieve the nodeIP via the 'downstream API'
// TODO(q3k): figure this out.
local kube = import "../../../kube/kube.libsonnet";
local policies = import "../../../kube/policies.libsonnet";
{
Environment: {
local env = self,
local cfg = env.cfg,
cfg:: {
namespace: "identd",
image: "registry.k0.hswaw.net/q3k/identd:315532800-9f29c14dad77036d0820948139b5aee3fac25d59",
},
namespace: kube.Namespace(cfg.namespace),
local ns = self.namespace,
allowInsecure: policies.AllowNamespaceInsecure(cfg.namespace),
sa: ns.Contain(kube.ServiceAccount("identd")),
role: ns.Contain(kube.Role("access-pod-info")) {
rules: [
{
apiGroups: [""],
resources: ["pods"],
verbs: ["get"],
},
],
},
rb: ns.Contain(kube.RoleBinding("identd")) {
roleRef_: env.role,
subjects_: [env.sa],
},
daemonset: ns.Contain(kube.DaemonSet("identd")) {
spec+: {
template+: {
spec+: {
serviceAccountName: env.sa.metadata.name,
hostNetwork: true,
containers_: {
default: kube.Container("default") {
image: cfg.image,
env_: {
POD_NAME: kube.FieldRef("metadata.name"),
POD_NAMESPACE: kube.FieldRef("metadata.namespace"),
},
command: [
"/cluster/identd/identd",
"-identd_listen", "0.0.0.0:113",
"-identd_conntrack_proc", "/host/conntrack",
"-identd_containerd_socket", "/host/containerd.sock",
// Used by the service to figure out which
// node it's running on.
"-identd_pod_name", "$(POD_NAME)",
"-identd_pod_namespace", "$(POD_NAMESPACE)",
"-logtostderr",
],
volumeMounts_: {
conntrack: { mountPath: "/host/conntrack", },
containerd: { mountPath: "/host/containerd.sock", },
},
resources: {
requests: {
cpu: "0.1",
memory: "64M",
},
// Allow identd to spike to 1 CPU. This
// makes it faster when fetching
// information from containerd.
limits: {
cpu: "1",
memory: "256M",
},
},
},
},
volumes_: {
conntrack: kube.HostPathVolume("/proc/net/nf_conntrack", "File"),
containerd: kube.HostPathVolume("/var/run/containerd/containerd.sock", "Socket"),
},
},
},
},
},
}
}