blob: 27720a94fe94b19dd95cee6835875d6a1bd18dd7 [file] [log] [blame]
# Deploy a per-cluster CoreDNS
local kube = import "../../../kube/kube.libsonnet";
{
Environment: {
local env = self,
local cfg = env.cfg,
cfg:: {
image: "coredns/coredns:1.3.0",
namespace: "kube-system",
upstream_server: "185.236.240.1",
cluster_domains: [
"cluster.local",
],
reverse_cidrs: ["in-addr.arpa", "ip6.arpa"],
clusterIP: "10.10.12.254",
},
sa: kube.ServiceAccount("coredns") {
metadata+: {
namespace: cfg.namespace,
},
},
cr: kube.ClusterRole("system:coredns") {
metadata+: {
labels: {
"kubernetes.io/bootstrapping": "rbac-defaults",
},
},
rules: [
{
apiGroups: [""],
resources: ["endpoints", "services", "pods", "namespaces"],
verbs: ["list", "watch"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get"],
},
],
},
crb: kube.ClusterRoleBinding("system:coredns") {
metadata+: {
labels: {
"kubernetes.io/bootstrapping": "rbac-defaults",
},
annotations+: {
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
},
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
name: env.cr.metadata.name,
},
subjects: [
{
kind: "ServiceAccount",
name: env.sa.metadata.name,
namespace: env.sa.metadata.namespace,
},
],
},
cm: kube.ConfigMap("coredns") {
local map = self,
upstream_server:: cfg.upstream_server,
cluster_domains:: std.join(" ", cfg.cluster_domains),
reverse_cidrs:: std.join(" ", cfg.reverse_cidrs),
metadata+: {
namespace: cfg.namespace,
},
data: {
Corefile: |||
.:53 {
log
errors
health
kubernetes %s %s {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
rewrite name suffix .svc.k0.hswaw.net .svc.cluster.local
prometheus :9153
proxy . %s
cache 30
loop
reload
loadbalance
}
||| % [map.cluster_domains, map.reverse_cidrs, map.upstream_server]
},
},
deployment: kube.Deployment("coredns") {
metadata+: {
namespace: cfg.namespace,
labels+: {
"k8s-app": "coredns",
},
},
spec+: {
replicas: 2,
strategy: {
type: "RollingUpdate",
rollingUpdate: { maxUnavailable: 1 },
},
template+: {
spec+: {
serviceAccountName: env.sa.metadata.name,
tolerations: [
{ key: "CriticalAddonsOnly", operator: "Exists" },
],
dnsPolicy: "Default",
volumes_: {
config: {
configMap: {
name: env.cm.metadata.name,
items: [ { key: "Corefile", path: "Corefile" } ],
},
},
},
containers_: {
coredns: kube.Container("coredns") {
local container = self,
image: cfg.image,
args: [
"-conf", "%s/Corefile" % container.volumeMounts[0].mountPath,
],
imagePullPolicy: "IfNotPresent",
resources: {
limits: { memory: "170Mi" },
requests: { memory: "70Mi", cpu: "100m" },
},
volumeMounts_: {
config: {
mountPath: "/etc/coredns",
},
},
ports_: {
dns: {
containerPort: 53,
protocol: "UDP",
},
"dns-tcp": {
containerPort: 53,
protocol: "TCP",
},
metrics: {
containerPort: 9153,
protocol: "TCP",
},
},
securityContext: {
allowPrivilegeEscalation: false,
capabilities: {
add: ["NET_BIND_SERVICE"],
drop: ["all"],
},
readOnlyRootFilesystem: true,
},
livenessProbe: {
httpGet: {
path: "/health",
port: 8080,
scheme: "HTTP",
},
initialDelaySeconds: 60,
timeoutSeconds: 5,
successThreshold: 1,
failureThreshold: 5,
},
},
},
},
},
},
},
svc: kube.Service("coredns") {
local svc = self,
metadata+: {
namespace: cfg.namespace,
},
target_pod: env.deployment.spec.template,
spec+: {
ports: [ { name: p.name, port: p.containerPort, protocol: p.protocol } for p in svc.target_pod.spec.containers[0].ports ],
clusterIP: cfg.clusterIP,
},
},
},
}