smsgw: productionize, implement kube/mirko
This productionizes smsgw.
We also add some jsonnet machinery to provide a unified service for Go
micro/mirkoservices.
This machinery provides all the nice stuff:
- a deployment
- a service for all your types of pots
- TLS certificates for HSPKI
We also update and test hspki for a new name scheme.
Change-Id: I292d00f858144903cbc8fe0c1c26eb1180d636bc
diff --git a/kube/mirko.libsonnet b/kube/mirko.libsonnet
new file mode 100644
index 0000000..55ff90e
--- /dev/null
+++ b/kube/mirko.libsonnet
@@ -0,0 +1,216 @@
+# Mirko, an abstraction layer for hscloud kubernetes services.
+
+local kube = import "kube.libsonnet";
+
+{
+ Environment(name): {
+ local env = self,
+ local cfg = env.cfg,
+ cfg:: {
+ name: name,
+ namespace: cfg.name,
+ },
+
+ namespace: kube.Namespace(cfg.namespace),
+
+ components: {}, // type: mirko.Component
+
+ // Currently hardcoded!
+ // This might end up being something passed part of kubecfg evaluation,
+ // when we get to supporting multiple/federated clusters.
+ // For now, this is goog enough.
+ pkiRealm:: "hswaw.net",
+ pkiClusterFQDN:: "k0.hswaw.net",
+
+ // Generate an ingress if we have any public ports.
+ publicHTTPPorts:: std.flattenArrays([
+ [
+ {
+ local component = env.components[c],
+
+ service: component.svc,
+ port: component.cfg.ports.publicHTTP[p].port,
+ dns: component.cfg.ports.publicHTTP[p].dns,
+ }
+ for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
+ ]
+ for c in std.objectFields(env.components)
+ ]),
+
+ ingress: if std.length(env.publicHTTPPorts) > 0 then kube.Ingress("mirko-public") {
+ metadata+: {
+ namespace: env.cfg.namespace,
+ labels: {
+ "app.kubernetes.io/name": cfg.name,
+ "app.kubernetes.io/managed-by": "kubecfg-mirko",
+ "app.kubernetes.io/component": cfg.name,
+ "mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
+ "mirko.hscloud.hackerspace.pl/component": "mirko-public-ingress",
+ },
+ annotations+: {
+ "kubernetes.io/tls-acme": "true",
+ "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+ },
+ },
+ spec+: {
+ tls: [
+ {
+ hosts: [p.dns for p in env.publicHTTPPorts],
+ secretName: "mirko-public-tls",
+ },
+ ],
+ rules: [
+ {
+ host: p.dns,
+ http: {
+ paths: [
+ { path: "/", backend: { serviceName: p.service.metadata.name, servicePort: p.port }},
+ ],
+ },
+ }
+ for p in env.publicHTTPPorts
+ ],
+ },
+ } else {}
+ },
+
+ Component(env, name): {
+ local component = self,
+ local cfg = component.cfg,
+
+ makeName(suffix):: "%s%s%s" % [cfg.prefix, cfg.name, suffix],
+
+ metadata:: {
+ namespace: env.cfg.namespace,
+ labels: {
+ "app.kubernetes.io/name": env.cfg.name,
+ "app.kubernetes.io/managed-by": "kubecfg-mirko",
+ "app.kubernetes.io/component": cfg.name,
+ "mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
+ "mirko.hscloud.hackerspace.pl/component": cfg.name,
+ },
+ },
+
+
+ # Tunables for users.
+ cfg:: {
+ name: name,
+
+ prefix:: "",
+ image:: env.image,
+ volumes:: {},
+ containers:: {
+ main: cfg.container,
+ },
+ container:: error "container(s) must be set",
+ ports:: {
+ publicHTTP: {}, // name -> { port: no, dns: fqdn }
+ grpc: { main: 4200 }, // name -> port no
+ },
+
+ },
+
+ allPorts:: {
+ ['grpc-' + p]: cfg.ports.grpc[p]
+ for p in std.objectFields(cfg.ports.grpc)
+ } + {
+ ['pubhttp-' + p] : cfg.ports.publicHTTP[p].port
+ for p in std.objectFields(cfg.ports.publicHTTP)
+ },
+
+ Container(name):: kube.Container(component.makeName(name)) {
+ image: cfg.image,
+ volumeMounts_: {
+ pki: { mountPath: "/mnt/pki" },
+ },
+ ports_: {
+ [p]: { containerPort: component.allPorts[p] }
+ for p in std.objectFields(component.allPorts)
+ },
+ resources: {
+ requests: {
+ cpu: "25m",
+ memory: "64Mi",
+ },
+ limits: {
+ cpu: "500m",
+ memory: "128Mi",
+ },
+ },
+ },
+
+ GoContainer(name, binary):: component.Container(name) {
+ command: [
+ binary,
+ "-hspki_realm", env.pkiRealm,
+ "-hspki_cluster", env.pkiClusterFQDN,
+ "-hspki_tls_ca_path", "/mnt/pki/ca.crt",
+ "-hspki_tls_certificate_path", "/mnt/pki/tls.crt",
+ "-hspki_tls_key_path", "/mnt/pki/tls.key",
+ "-logtostderr",
+ "-listen_address", "0.0.0.0:4200",
+ ],
+ },
+
+ deployment: kube.Deployment(component.makeName("-main")) {
+ metadata+: component.metadata,
+ spec+: {
+ template+: {
+ spec+: {
+ volumes_: {
+ pki: {
+ secret: { secretName: component.pki.cert.spec.secretName },
+ },
+ } + cfg.volumes,
+ containers_: cfg.containers,
+
+ serviceAccountName: component.sa.metadata.name,
+ },
+ },
+ },
+ },
+
+ svc: kube.Service(component.makeName("")) { // No suffix, name part of DNS entry.
+ metadata+: component.metadata,
+ target_pod:: component.deployment.spec.template,
+ spec+: {
+ ports: [
+ {
+ name: p,
+ port: component.allPorts[p],
+ targetPort: component.allPorts[p],
+ }
+ for p in std.objectFields(component.allPorts)
+ ],
+ },
+ },
+
+ sa: kube.ServiceAccount(component.makeName("-main")) {
+ metadata+: component.metadata,
+ },
+
+ pki: {
+ cert: kube.Certificate(component.makeName("-cert")) {
+ metadata+: component.metadata,
+
+ spec: {
+ secretName: component.makeName("-cert"),
+ duration: "35040h0m0s", // 4 years
+ issuerRef: {
+ // Contract with cluster/lib/pki.libsonnet.
+ name: "pki-ca",
+ kind: "ClusterIssuer",
+ },
+ commonName: "%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
+ dnsNames: [
+ "%s" % [component.svc.metadata.name ],
+ "%s.%s" % [component.svc.metadata.name, component.svc.metadata.namespace ],
+ "%s.%s.svc" % [component.svc.metadata.name, component.svc.metadata.namespace ],
+ "%s.%s.svc.cluster.local" % [component.svc.metadata.name, component.svc.metadata.namespace ],
+ "%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
+ ],
+ },
+ },
+ },
+ },
+}