blob: 463087e9f746f6ec496fb6ebbe811e27adafdf07 [file] [log] [blame]
# this is libjsonnet library for kubernetes related things
local kube = import '../../../kube/kube.libsonnet';
{
local shells = self,
local cfg = shells.cfg,
# namespace defining parameters used by other functions
# double colon "::" prevents it from appearing in output file
cfg:: {
namespace: "personal-vuko",
appName: "three-shell-system",
domain: "shells.vuko.pl",
nginx_tag: "latest",
nginx_image: "nginxinc/nginx-unprivileged:stable-alpine",
storageClassName: "waw-hdd-redundant-2",
resources: {
requests: {
cpu: "25m",
memory: "50Mi",
},
limits: {
cpu: "100m",
memory: "200Mi",
},
},
},
# kubernete namespace personal-${name} for personal usage
namespace: kube.Namespace(cfg.namespace),
# function used for configuring components metatada
metadata(component):: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": cfg.appName,
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": component,
},
},
# component - persistant (non volatile) memory
# https://kubernetes.io/docs/concepts/storage/persistent-volumes/
dataVolume: kube.PersistentVolumeClaim("html-data") {
# override default PersistentVolumeClaim metatada with values defined
# in medadata function prevoiusly created
# "+" sign before means override
metadata+: shells.metadata("html-data"),
spec+: {
storageClassName: cfg.storageClassName,
# can be connected to multiple containers
accessModes: [ "ReadWriteMany" ],
resources: {
requests: {
# amount of storage space: 500Mb
storage: "500Mi",
},
},
},
},
# deployment declares pods
# https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
deployment: kube.Deployment("shells") {
metadata+: shells.metadata("shells"),
spec+: {
replicas: 1,
template+: {
spec+: {
# names ending with _ have special meaning in this context
# this is specified in ../../../kube/kube.upstream.jsonnet
# volumes_ { key: { ... } } is converted to volumes [{ name: key, ... }]
volumes_: {
# sftp container host keys secrets saved to kubernetes semi-manually using create-secrets.py
# https://kubernetes.io/docs/concepts/configuration/secret/
host_keys: { secret: { secretName: "shells-ssh-host-key-bd65mg4gbt" } },
# sftp container authorized_keys saved to kubernetes using command:
# kubectl -n personal-vuko create secret generic shells-ssh-authorized-keys --from-file="authorized_keys=${HOME}/.ssh/id_ed25519.pub"
authorized_keys: { secret: { secretName: "shells-ssh-authorized-keys", defaultMode: 256 } },
# to use created volume in deployment we need to claim it
html: kube.PersistentVolumeClaimVolume(shells.dataVolume),
},
# here are containers defined
# when they are defined in one deployment
containers_: {
shells: kube.Container("nginx") {
image: cfg.nginx_image,
ports_: {
http: { containerPort: 80 },
},
resources: cfg.resources,
volumeMounts_: {
html: { mountPath: "/usr/share/nginx/html" },
},
},
sftp: kube.Container("sftp") {
image: "registry.k0.hswaw.net/vuko/hs-shells-sftp:latest",
ports_: {
sftp: { containerPort: 2222 },
},
command: [ "/bin/start" ],
resources: cfg.resources,
securityContext: {
# specify uid of user running command
runAsUser: 1,
},
volumeMounts_: {
# here volumes defined in volumes_ can be mounted
host_keys: { mountPath: "/etc/ssh/host" },
authorized_keys: { mountPath: "/etc/ssh/auth" },
html: { mountPath: "/data" },
},
},
},
},
},
},
},
# defining a service of type LoadBancer gives you acces from internet
# run: kubectl -n personal-${user} get services to see ip address
svc: kube.Service("shells") {
metadata+: shells.metadata("shells"),
target_pod:: shells.deployment.spec.template,
spec+: {
ports: [
{ name: "http", port: 80, targetPort: 8080, protocol: "TCP" },
{ name: "sftp", port: 22, targetPort: 2222, protocol: "TCP" },
],
type: "LoadBalancer",
externalTrafficPolicy: "Local",
},
},
# ingress creates VirtualHost on ingress.k0.hswaw.net forwaring http(s)
# requests to your domain to specified Pod/container
ingress: kube.Ingress("frontend") {
metadata+: shells.metadata("frontend") {
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
},
},
spec+: {
tls: [
{ hosts: [cfg.domain], secretName: "shells-frontend-tls"}
],
rules: [
{
host: cfg.domain,
http: {
paths: [
{ path: "/", backend: shells.svc.name_port },
],
},
},
],
},
},
}