blob: 5203afd7a631f4483ac27375b5e81a93a1d4ea8f [file] [log] [blame]
# Mirko, an abstraction layer for hscloud kubernetes services.
local kube = import "kube.libsonnet";
{
Environment(name): {
local env = self,
local cfg = env.cfg,
cfg:: {
name: name,
namespace: cfg.name,
},
namespace: kube.Namespace(cfg.namespace),
components: {}, // type: mirko.Component
// Currently hardcoded!
// This might end up being something passed part of kubecfg evaluation,
// when we get to supporting multiple/federated clusters.
// For now, this is goog enough.
pkiRealm:: "hswaw.net",
pkiClusterFQDN:: "k0.hswaw.net",
// Generate an ingress if we have any public ports.
publicHTTPPorts:: std.flattenArrays([
[
{
local component = env.components[c],
service: component.svc,
port: component.cfg.ports.publicHTTP[p].port,
dns: component.cfg.ports.publicHTTP[p].dns,
// Extra headers to set.
// BUG(q3k): these headers are applied to all components in the environment!
// We should be splitting up ingresses where necessary to combat this.
setHeaders: [],
// Extra paths to add to ingress. These are bare HTTPIngressPaths.
extraPaths: component.cfg.extraPaths,
}
for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
]
for c in std.objectFields(env.components)
]),
ingress: if std.length(env.publicHTTPPorts) > 0 then kube.Ingress("mirko-public") {
metadata+: {
namespace: env.cfg.namespace,
labels: {
"app.kubernetes.io/name": cfg.name,
"app.kubernetes.io/managed-by": "kubecfg-mirko",
"app.kubernetes.io/component": cfg.name,
"mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
"mirko.hscloud.hackerspace.pl/component": "mirko-public-ingress",
},
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
[if env.ingressServerSnippet != null then "nginx.ingress.kubernetes.io/server-snippet"]: env.ingressServerSnippet,
[if std.length(env.extraHeaders) > 0 then "nginx.ingress.kubernetes.io/configuration-snippet"]:
std.join("\n", ["proxy_set_header %s;" % [h] for h in env.extraHeaders]),
},
},
spec+: {
tls: [
{
hosts: [p.dns for p in env.publicHTTPPorts],
secretName: "mirko-public-tls",
},
],
rules: [
{
host: p.dns,
http: {
paths: [
{ path: "/", backend: { serviceName: p.service.metadata.name, servicePort: p.port }},
] + p.extraPaths,
},
}
for p in env.publicHTTPPorts
],
},
} else {},
// Nginx Ingress Controller server configuration snippet to add.
ingressServerSnippet:: null,
// Extra request headers to add to ingress
extraHeaders:: std.flattenArrays([
std.flattenArrays([
local portc = env.components[c].cfg.ports.publicHTTP[p];
if std.objectHas(portc, "setHeaders") then portc.setHeaders else []
for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
])
for c in std.objectFields(env.components)
]),
},
Component(env, name): {
local component = self,
local cfg = component.cfg,
makeName(suffix):: "%s%s%s" % [cfg.prefix, cfg.name, suffix],
makeNameGlobal(suffix):: "%s-%s" % [env.cfg.namespace, component.makeName(suffix)],
metadata:: {
namespace: env.cfg.namespace,
labels: {
"app.kubernetes.io/name": env.cfg.name,
"app.kubernetes.io/managed-by": "kubecfg-mirko",
"app.kubernetes.io/component": cfg.name,
"mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
"mirko.hscloud.hackerspace.pl/component": cfg.name,
},
},
# Tunables for users.
cfg:: {
name: name,
prefix:: "",
image:: env.image,
volumes:: {},
containers:: {
main: cfg.container,
},
nodeSelector: null,
securityContext: {},
container:: error "container(s) must be set",
initContainer:: null,
ports:: {
publicHTTP: {}, // name -> { port: no, dns: fqdn }
grpc: { main: 4200 }, // name -> port no
},
extraPaths:: [],
},
allPorts:: {
['grpc-' + p]: cfg.ports.grpc[p]
for p in std.objectFields(cfg.ports.grpc)
} + {
['pubhttp-' + p] : cfg.ports.publicHTTP[p].port
for p in std.objectFields(cfg.ports.publicHTTP)
},
Container(name):: kube.Container(component.makeName(name)) {
image: cfg.image,
volumeMounts_: {
pki: { mountPath: "/mnt/pki" },
},
ports_: {
[p]: { containerPort: component.allPorts[p] }
for p in std.objectFields(component.allPorts)
},
resources: {
requests: {
cpu: "25m",
memory: "64Mi",
},
limits: {
cpu: "500m",
memory: "128Mi",
},
},
},
GoContainer(name, binary):: component.Container(name) {
command: [
binary,
"-hspki_realm", env.pkiRealm,
"-hspki_cluster", env.pkiClusterFQDN,
"-hspki_tls_ca_path", "/mnt/pki/ca.crt",
"-hspki_tls_certificate_path", "/mnt/pki/tls.crt",
"-hspki_tls_key_path", "/mnt/pki/tls.key",
"-logtostderr",
"-listen_address", "0.0.0.0:4200",
],
},
deployment: kube.Deployment(component.makeName("-main")) {
metadata+: component.metadata,
spec+: {
template+: {
spec+: {
volumes_: {
pki: {
secret: { secretName: component.pki.cert.spec.secretName },
},
} + cfg.volumes,
containers_: cfg.containers,
[if cfg.initContainer != null then "initContainers"]: [cfg.initContainer],
nodeSelector: cfg.nodeSelector,
serviceAccountName: component.sa.metadata.name,
securityContext: cfg.securityContext,
},
},
},
},
svc: kube.Service(component.makeName("")) { // No suffix, name part of DNS entry.
metadata+: component.metadata,
target_pod:: component.deployment.spec.template,
spec+: {
ports: [
{
name: p,
port: component.allPorts[p],
targetPort: component.allPorts[p],
}
for p in std.objectFields(component.allPorts)
],
},
},
sa: kube.ServiceAccount(component.makeName("-main")) {
metadata+: component.metadata,
},
pki: {
cert: kube.Certificate(component.makeName("-cert")) {
metadata+: component.metadata,
spec: {
secretName: component.makeName("-cert"),
duration: "35040h0m0s", // 4 years
issuerRef: {
// Contract with cluster/lib/pki.libsonnet.
name: "pki-ca",
kind: "ClusterIssuer",
},
commonName: "%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
dnsNames: [
"%s" % [component.svc.metadata.name ],
"%s.%s" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc.cluster.local" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
],
},
},
},
},
}