kube: standardize on a `local top = self` convention
A convention is introduced to specify `local top = self` declaration at the top of an app/service/component's jsonnet, representing the top-level object. Reasoning is as following:
- `top` is more universal/unambiguous than `app`
- `top` is usually shorter than $NAME
- a conventional `top` instead of $NAME (coupled with other conventions introduced) makes app jsonnets wonderfully copy-paste'able, aiding in learning and quickly building
Change-Id: I7ece83ce7e97021ad98a6abb3500fb9839936811
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/1805
Reviewed-by: q3k <q3k@hackerspace.pl>
diff --git a/app/matrix/lib/cas.libsonnet b/app/matrix/lib/cas.libsonnet
index 4c70b45..8c7ed40 100644
--- a/app/matrix/lib/cas.libsonnet
+++ b/app/matrix/lib/cas.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
@@ -13,7 +13,7 @@
},
ns:: error "ns needs to be a kube.Namespace object",
- local ns = app.ns,
+ local ns = top.ns,
deployment: ns.Contain(kube.Deployment("oauth2-cas-proxy")) {
spec+: {
@@ -44,6 +44,6 @@
},
svc: ns.Contain(kube.Service("oauth2-cas-proxy")) {
- target:: app.deployment,
+ target:: top.deployment,
},
}
diff --git a/app/matrix/lib/coturn.libsonnet b/app/matrix/lib/coturn.libsonnet
index cfb5024..4d1c334 100644
--- a/app/matrix/lib/coturn.libsonnet
+++ b/app/matrix/lib/coturn.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
realm: error "cfg.realm must be set",
@@ -15,7 +15,7 @@
},
ns:: error "ns needs to be provided",
- local ns = app.ns,
+ local ns = top.ns,
configMap: ns.Contain(kube.ConfigMap("coturn")) {
data: {
@@ -71,8 +71,8 @@
template+: {
spec+: {
volumes_: {
- config: kube.ConfigMapVolume(app.configMap),
- data: kube.PersistentVolumeClaimVolume(app.dataVolume),
+ config: kube.ConfigMapVolume(top.configMap),
+ data: kube.PersistentVolumeClaimVolume(top.dataVolume),
},
containers_: {
coturn: kube.Container("coturn") {
@@ -131,7 +131,7 @@
},
svcTCP: ns.Contain(kube.Service("coturn-tcp")) {
- target:: app.deployment,
+ target:: top.deployment,
metadata+: {
annotations+: {
"metallb.universe.tf/allow-shared-ip": "coturn",
@@ -151,7 +151,7 @@
},
svcUDP: ns.Contain(kube.Service("coturn-udp")) {
- target:: app.deployment,
+ target:: top.deployment,
metadata+: {
annotations+: {
"metallb.universe.tf/allow-shared-ip": "coturn",
diff --git a/app/matrix/lib/matrix-ng.libsonnet b/app/matrix/lib/matrix-ng.libsonnet
index 620a8d5..44e97a9 100644
--- a/app/matrix/lib/matrix-ng.libsonnet
+++ b/app/matrix/lib/matrix-ng.libsonnet
@@ -98,8 +98,8 @@
local coturn = import "./coturn.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
# webDomain is the domain name at which element will run
@@ -336,16 +336,16 @@
synapse: synapse {
ns: ns,
- postgres: if cfg.postgres.enable then app.postgres3 else {
+ postgres: if cfg.postgres.enable then top.postgres3 else {
# If not using on-cluster postgres, pass the config postgres object
# as the postgres object into the synapse lib. It's a bit ugly (we
# should have some common 'config' type instead) but it's good
# enough.
cfg: cfg.postgres,
- }, redis: app.redis,
- appservices: app.appservices,
- cfg+: app.cfg {
- image: app.cfg.images.synapse,
+ }, redis: top.redis,
+ appservices: top.appservices,
+ cfg+: top.cfg {
+ image: top.cfg.images.synapse,
macaroonSecretKey: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } },
registrationSharedSecret: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } },
@@ -361,7 +361,7 @@
// until it spits you a registration YAML and you feed that to a secret.
appservices: {},
- ingress: app.namespace.Contain(kube.Ingress("matrix")) {
+ ingress: top.namespace.Contain(kube.Ingress("matrix")) {
metadata+: {
annotations+: {
"kubernetes.io/tls-acme": "true",
@@ -382,19 +382,19 @@
host: cfg.webDomain,
http: {
paths: [
- { path: path, backend: app.synapse.genericWorker.svc.name_port }
- for path in app.synapse.genericWorker.paths
+ { path: path, backend: top.synapse.genericWorker.svc.name_port }
+ for path in top.synapse.genericWorker.paths
] + [
- { path: "/", backend: app.riot.svc.name_port },
- { path: "/_matrix/media/", backend: if cfg.mediaRepo.route then app.mediaRepo.svc.name_port else app.synapse.mediaWorker.svc.name_port },
- { path: "/_matrix/", backend: app.synapse.main.svc.name_port },
+ { path: "/", backend: top.riot.svc.name_port },
+ { path: "/_matrix/media/", backend: if cfg.mediaRepo.route then top.mediaRepo.svc.name_port else top.synapse.mediaWorker.svc.name_port },
+ { path: "/_matrix/", backend: top.synapse.main.svc.name_port },
# Used by OpenID Connect login flow
- { path: "/_synapse/", backend: app.synapse.main.svc.name_port },
+ { path: "/_synapse/", backend: top.synapse.main.svc.name_port },
] + (if cfg.cas.enable then [
- { path: "/_cas", backend: app.cas.svc.name_port },
+ { path: "/_cas", backend: top.cas.svc.name_port },
] else []) + (if cfg.wellKnown then [
- { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
+ { path: "/.well-known/matrix", backend: top.wellKnown.svc.name_port },
] else [])
},
}
diff --git a/app/matrix/lib/matrix.libsonnet b/app/matrix/lib/matrix.libsonnet
index fef0bd8..c17a934 100644
--- a/app/matrix/lib/matrix.libsonnet
+++ b/app/matrix/lib/matrix.libsonnet
@@ -32,8 +32,8 @@
local postgres = import "../../../kube/postgres.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
# webDomain is the domain name at which element will run
@@ -96,7 +96,7 @@
},
dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") {
- metadata+: app.metadata("synapse-data"),
+ metadata+: top.metadata("synapse-data"),
storage:: "50Gi",
storageClass:: cfg.storageClassName,
},
@@ -112,7 +112,7 @@
signing_key_path: "/data/%s.signing.key" % [cfg.serverName],
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
],
} + (if cfg.cas.enable then {
cas_config: {
@@ -123,15 +123,15 @@
} else {}),
synapseConfigMap: kube.ConfigMap("synapse") {
- metadata+: app.metadata("synapse"),
+ metadata+: top.metadata("synapse"),
data: {
- "homeserver.yaml": std.manifestYamlDoc(app.synapseConfig),
+ "homeserver.yaml": std.manifestYamlDoc(top.synapseConfig),
"log.config": importstr "synapse/log.config",
},
},
casDeployment: if cfg.cas.enable then kube.Deployment("oauth2-cas-proxy") {
- metadata+: app.metadata("oauth2-cas-proxy"),
+ metadata+: top.metadata("oauth2-cas-proxy"),
spec+: {
replicas: 1,
template+: {
@@ -160,22 +160,22 @@
},
casSvc: if cfg.cas.enable then kube.Service("oauth2-cas-proxy") {
- metadata+: app.metadata("oauth2-cas-proxy"),
- target:: app.casDeployment,
+ metadata+: top.metadata("oauth2-cas-proxy"),
+ target:: top.casDeployment,
},
synapseDeployment: kube.Deployment("synapse") {
- metadata+: app.metadata("synapse"),
+ metadata+: top.metadata("synapse"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
- data: kube.PersistentVolumeClaimVolume(app.dataVolume),
- config: kube.ConfigMapVolume(app.synapseConfigMap),
+ data: kube.PersistentVolumeClaimVolume(top.dataVolume),
+ config: kube.ConfigMapVolume(top.synapseConfigMap),
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
},
containers_: {
web: kube.Container("synapse") {
@@ -203,7 +203,7 @@
config: { mountPath: "/conf", },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
},
},
},
@@ -218,8 +218,8 @@
},
synapseSvc: kube.Service("synapse") {
- metadata+: app.metadata("synapse"),
- target:: app.synapseDeployment,
+ metadata+: top.metadata("synapse"),
+ target:: top.synapseDeployment,
},
riotConfig:: {
@@ -253,9 +253,9 @@
},
riotConfigMap: kube.ConfigMap("riot-web-config") {
- metadata+: app.metadata("riot-web-config"),
+ metadata+: top.metadata("riot-web-config"),
data: {
- "config.json": std.manifestJsonEx(app.riotConfig, ""),
+ "config.json": std.manifestJsonEx(top.riotConfig, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": |||
worker_processes auto;
@@ -304,13 +304,13 @@
},
riotDeployment: kube.Deployment("riot-web") {
- metadata+: app.metadata("riot-web"),
+ metadata+: top.metadata("riot-web"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
- config: kube.ConfigMapVolume(app.riotConfigMap),
+ config: kube.ConfigMapVolume(top.riotConfigMap),
},
containers_: {
web: kube.Container("riot-web") {
@@ -343,13 +343,13 @@
},
riotSvc: kube.Service("riot-web") {
- metadata+: app.metadata("riot-web"),
- target:: app.riotDeployment,
+ metadata+: top.metadata("riot-web"),
+ target:: top.riotDeployment,
},
wellKnown: if cfg.wellKnown then {
deployment: kube.Deployment("wellknown") {
- metadata+: app.metadata("wellknown"),
+ metadata+: top.metadata("wellknown"),
spec+: {
replicas: 1,
template+: {
@@ -373,8 +373,8 @@
},
},
svc: kube.Service("wellknown") {
- metadata+: app.metadata("wellknown"),
- target:: app.wellKnown.deployment,
+ metadata+: top.metadata("wellknown"),
+ target:: top.wellKnown.deployment,
},
} else {},
@@ -388,14 +388,14 @@
ingress: kube.SimpleIngress("matrix") {
hosts:: [cfg.webDomain],
- target_service:: app.riotSvc,
- metadata+: app.metadata("matrix"),
+ target_service:: top.riotSvc,
+ metadata+: top.metadata("matrix"),
extra_paths:: [
- { path: "/_matrix", backend: app.synapseSvc.name_port },
+ { path: "/_matrix", backend: top.synapseSvc.name_port },
] + (if cfg.cas.enable then [
- { path: "/_cas", backend: app.casSvc.name_port },
+ { path: "/_cas", backend: top.casSvc.name_port },
] else []) + (if cfg.wellKnown then [
- { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
+ { path: "/.well-known/matrix", backend: top.wellKnown.svc.name_port },
] else [])
},
}
diff --git a/app/matrix/lib/media-repo.libsonnet b/app/matrix/lib/media-repo.libsonnet
index 275be24..0c61094 100644
--- a/app/matrix/lib/media-repo.libsonnet
+++ b/app/matrix/lib/media-repo.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
image: error "cfg.image needs to be set",
@@ -27,7 +27,7 @@
},
ns:: error "ns needs to be a kube.Namespace object",
- local ns = app.ns,
+ local ns = top.ns,
config:: {
repo: {
@@ -63,7 +63,7 @@
configSecret: ns.Contain(kube.Secret("media-repo-config")) {
data_: {
- "config.yaml": std.manifestJsonEx(app.config, ""),
+ "config.yaml": std.manifestJsonEx(top.config, ""),
},
},
@@ -73,7 +73,7 @@
template+: {
spec+: {
volumes_: {
- config: kube.SecretVolume(app.configSecret),
+ config: kube.SecretVolume(top.configSecret),
tempdir: kube.EmptyDirVolume(),
},
containers_: {
@@ -144,7 +144,7 @@
"/app/matrix/media-repo-proxy",
"-downstream_host", downstreamHost,
"-upstream_host", upstreamHost,
- "-upstream", app.internalSvc.host_colon_port,
+ "-upstream", top.internalSvc.host_colon_port,
"-listen", ":8080",
],
ports_: {
@@ -159,10 +159,10 @@
} else {},
internalSvc: ns.Contain(kube.Service("media-repo-internal")) {
- target:: app.deployment,
+ target:: top.deployment,
},
svc: if std.length(needProxying) > 0 then ns.Contain(kube.Service("media-repo")) {
- target:: app.proxies.deployment,
- } else app.internalSvc,
+ target:: top.proxies.deployment,
+ } else top.internalSvc,
}
diff --git a/app/matrix/lib/riot.libsonnet b/app/matrix/lib/riot.libsonnet
index 3d0dbdf..0aa9574 100644
--- a/app/matrix/lib/riot.libsonnet
+++ b/app/matrix/lib/riot.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
# webDomain is the domain name at which element will run
webDomain: error "cfg.webDomain must be set",
@@ -12,7 +12,7 @@
},
ns:: error "ns needs to be a kube.Namespace object",
- local ns = app.ns,
+ local ns = top.ns,
config:: {
"default_hs_url": "https://%s" % [cfg.webDomain],
@@ -46,7 +46,7 @@
configMap: ns.Contain(kube.ConfigMap("riot-web-config")) {
data: {
- "config.json": std.manifestJsonEx(app.config, ""),
+ "config.json": std.manifestJsonEx(top.config, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": importstr "riot/nginx.conf",
},
@@ -58,7 +58,7 @@
template+: {
spec+: {
volumes_: {
- config: kube.ConfigMapVolume(app.configMap),
+ config: kube.ConfigMapVolume(top.configMap),
},
containers_: {
web: kube.Container("riot-web") {
@@ -91,6 +91,6 @@
},
svc: ns.Contain(kube.Service("riot-web")) {
- target:: app.deployment,
+ target:: top.deployment,
},
}
diff --git a/app/matrix/lib/synapse.libsonnet b/app/matrix/lib/synapse.libsonnet
index c9a03e7..a6cf9dd 100644
--- a/app/matrix/lib/synapse.libsonnet
+++ b/app/matrix/lib/synapse.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
image: error "cfg.image needs to be set",
storageClassName: error "cfg.storrageClassName needs to be set",
@@ -24,7 +24,7 @@
},
ns:: error "ns needs to be provided",
- local ns = app.ns,
+ local ns = top.ns,
postgres:: error "postgres needs to be provided",
redis:: error "redis needs to be provided",
@@ -45,7 +45,7 @@
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
],
notify_appservices: cfg.appserviceWorker == false,
@@ -54,8 +54,8 @@
# *some* federation, needs investigation...
#send_federation: cfg.federationWorker == false,
#federation_sender_instances: if cfg.federationWorker then [
- # "%s-%s" % [app.federationSenderWorker.deployment.metadata.name, idx]
- # for idx in std.range(0, app.federationSenderWorker.deployment.spec.replicas)
+ # "%s-%s" % [top.federationSenderWorker.deployment.metadata.name, idx]
+ # for idx in std.range(0, top.federationSenderWorker.deployment.spec.replicas)
#] else [],
} + (if cfg.cas.enable then {
cas_config: {
@@ -74,7 +74,7 @@
configMap: ns.Contain(kube.ConfigMap("synapse")) {
data: {
- "homeserver.yaml": std.manifestYamlDoc(app.config),
+ "homeserver.yaml": std.manifestYamlDoc(top.config),
"log.config": importstr "synapse/log.config",
},
},
@@ -121,13 +121,13 @@
template+: {
spec+: {
volumes_: {
- config: kube.ConfigMapVolume(app.configMap),
+ config: kube.ConfigMapVolume(top.configMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
- data: kube.PersistentVolumeClaimVolume(app.dataVolume),
+ data: kube.PersistentVolumeClaimVolume(top.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
@@ -153,18 +153,18 @@
SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
- POSTGRES_PASSWORD: app.postgres.cfg.password,
- POSTGRES_USER: app.postgres.cfg.username,
- POSTGRES_DB: app.postgres.cfg.database,
- POSTGRES_HOST: app.postgres.cfg.host,
- POSTGRES_PORT: app.postgres.cfg.port,
+ POSTGRES_PASSWORD: top.postgres.cfg.password,
+ POSTGRES_USER: top.postgres.cfg.username,
+ POSTGRES_DB: top.postgres.cfg.database,
+ POSTGRES_HOST: top.postgres.cfg.host,
+ POSTGRES_PORT: top.postgres.cfg.port,
- REDIS_PASSWORD: app.redis.cfg.password,
+ REDIS_PASSWORD: top.redis.cfg.password,
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
TURN_SHARED_SECRET: if cfg.coturn.enable then cfg.coturn.config.authSecret else "",
- X_SECRETS_CONFIG: std.manifestYamlDoc(app.secretsConfig),
+ X_SECRETS_CONFIG: std.manifestYamlDoc(top.secretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
@@ -172,7 +172,7 @@
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
- for k in std.objectFields(app.appservices)
+ for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
@@ -206,14 +206,14 @@
# Synapse main process
main: {
- deployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
+ deployment: top.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
localConfig: {
# Following configuration values need to cause master
# process restart.
- notify_appservices: app.config.notify_appservices,
- # send_federation: app.config.send_federation,
- # federation_sender_instances: app.config.federation_sender_instances,
+ notify_appservices: top.config.notify_appservices,
+ # send_federation: top.config.send_federation,
+ # federation_sender_instances: top.config.federation_sender_instances,
},
resources+: {
@@ -231,10 +231,10 @@
},
},
svc: ns.Contain(kube.Service("synapse")) {
- target:: app.main.deployment,
+ target:: top.main.deployment,
},
replicationSvc: ns.Contain(kube.Service("synapse-replication-master")) {
- target:: app.main.deployment,
+ target:: top.main.deployment,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
@@ -245,7 +245,7 @@
genericWorker: {
# Synapse generic worker deployment
- deployment: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
+ deployment: top.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
@@ -263,7 +263,7 @@
},
},
svc: ns.Contain(kube.Service("synapse-generic")) {
- target:: app.genericWorker.deployment,
+ target:: top.genericWorker.deployment,
},
# Following paths can be handled by generic workers.
@@ -368,7 +368,7 @@
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
mediaWorker: {
- deployment: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
+ deployment: top.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
@@ -387,13 +387,13 @@
},
},
svc: ns.Contain(kube.Service("synapse-media")) {
- target:: app.mediaWorker.deployment,
+ target:: top.mediaWorker.deployment,
},
},
appserviceWorker: if cfg.appserviceWorker then {
# Worker responsible for sending traffic to registered appservices
- deployment: app.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
+ deployment: top.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
@@ -413,7 +413,7 @@
} else null,
federationSenderWorker: if cfg.federationWorker then {
- deployment: app.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
+ deployment: top.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
diff --git a/app/matrix/lib/wellknown.libsonnet b/app/matrix/lib/wellknown.libsonnet
index 1760e01..99c87c9 100644
--- a/app/matrix/lib/wellknown.libsonnet
+++ b/app/matrix/lib/wellknown.libsonnet
@@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
- local app = self,
- local cfg = app.cfg,
+ local top = self,
+ local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
@@ -37,6 +37,6 @@
},
},
svc: ns.Contain(kube.Service("wellknown")) {
- target:: app.deployment,
+ target:: top.deployment,
},
}