blob: 275be2483345f2421f193e341c97502538361ba1 [file] [log] [blame]
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
image: error "cfg.image needs to be set",
homeservers: [],
admins: [],
s3: {
endpoint: error "cfg.s3.endpoint needs to be set",
accessKey: error "cfg.s3.accessKey needs to be set",
secretKey: error "cfg.s3.secretKey needs to be set",
bucketName: error "cfg.s3.bucketName needs to be set",
region: error "cfg.s3.region needs to be set",
},
db: {
username: error "cfg.db.username needs to be set",
password: error "cfg.db.password needs to be set",
database: error "cfg.db.database needs to be set",
host: error "cfg.db.host needs to be set",
port: error "cfg.db.port needs to be set",
},
},
ns:: error "ns needs to be a kube.Namespace object",
local ns = app.ns,
config:: {
repo: {
bindAddress: "0.0.0.0",
port: 8000,
useForwardedHost: false,
},
database: {
postgres: "postgres://%s:%s@%s:%d/%s?sslmode=disable" % [cfg.db.username, cfg.db.password, cfg.db.host, cfg.db.port, cfg.db.database],
},
homeservers: cfg.homeservers,
admins: cfg.admins,
thumbnails: {
maxSourceBytes: 10485760 * 3,
},
datastores: [
{
type: "s3",
enabled: true,
forKinds: ["all"],
opts: {
tempPath: "/tmp/mediarepo_s3_upload",
endpoint: cfg.s3.endpoint,
accessKeyId: cfg.s3.accessKey,
accessSecret: cfg.s3.secretKey,
ssl: false,
bucketName: cfg.s3.bucketName,
region: cfg.s3.region,
},
}
],
},
configSecret: ns.Contain(kube.Secret("media-repo-config")) {
data_: {
"config.yaml": std.manifestJsonEx(app.config, ""),
},
},
deployment: ns.Contain(kube.Deployment("media-repo")) {
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.SecretVolume(app.configSecret),
tempdir: kube.EmptyDirVolume(),
},
containers_: {
repo: kube.Container("media-repo") {
image: cfg.image,
command: ["/usr/local/bin/media_repo"],
ports_: {
http: { containerPort: 8000 },
},
env_: {
REPO_CONFIG: "/config",
},
volumeMounts_: {
config: { mountPath: "/config" },
tempdir: { mountPath: "/tmp/mediarepo_s3_upload" },
},
readinessProbe: {
httpGet: {
path: "/healthz",
port: "http",
},
initialDelaySeconds: 5,
periodSeconds: 10,
},
livenessProbe: {
httpGet: {
path: "/healthz",
port: "http",
},
initialDelaySeconds: 60,
periodSeconds: 30,
},
},
},
},
},
},
},
// Run //app/matrix/media-repo-proxy, if needed. This rewrites Host headers
// from the homeserver's serving Host to the MXID hostname (which
// matrix-media-repo expects).
//
// Currently we only are able to run one proxy for one homeserver config -
// but we don't expect to have multiple homeservers per matrix-media-repo
// any time soon.
local needProxying = [
h
for h in cfg.homeservers
if "https://%s" % [h.name] != h.csApi
],
proxies: if std.length(needProxying) > 1 then error "can only proxy one homeserver" else
if std.length(needProxying) == 1 then {
local homeserver = needProxying[0],
local upstreamHost = homeserver.name,
local prefix = "https://",
local downstreamHost = std.substr(homeserver.csApi, std.length(prefix), std.length(homeserver.csApi)-std.length(prefix)),
deployment: ns.Contain(kube.Deployment("media-repo-proxy")) {
spec+: {
template+: {
spec+: {
containers_: {
default: kube.Container("default") {
image: "registry.k0.hswaw.net/q3k/media-repo-proxy:1631791816-18609443fffde38a055f504e80f95e44f49d2481",
command: [
"/app/matrix/media-repo-proxy",
"-downstream_host", downstreamHost,
"-upstream_host", upstreamHost,
"-upstream", app.internalSvc.host_colon_port,
"-listen", ":8080",
],
ports_: {
http: { containerPort: 8080 },
},
},
},
},
},
},
},
} else {},
internalSvc: ns.Contain(kube.Service("media-repo-internal")) {
target:: app.deployment,
},
svc: if std.length(needProxying) > 0 then ns.Contain(kube.Service("media-repo")) {
target:: app.proxies.deployment,
} else app.internalSvc,
}