blob: 914a41cc3093c6245c4b7b6bcf3cf50c276d848a [file] [log] [blame]
# Factorio on Kubernetes.
local kube = import "../../../kube/kube.libsonnet";
local proxy = import "proxy.libsonnet";
{
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "namespace must be set",
appName: "factorio",
storageClassName: "waw-hdd-redundant-3",
prefix: "", # if set, should be 'foo-'
proxyImage: proxy.cfg.image,
rconPort: 2137,
rconPassword: "farts",
// Configuration options that will get serialized into
// --config config.ini
factorioConfig: {
// There is no documentation for this file, but you can check
// config.ini in any installed Factorio data directory for a
// sample.
//
// This uses std.manifestIni, so to create a file containing:
// version=9
// [other]
// verbose-logging=true
// You would set the following:
// main: {
// version: "9",
// },
// other: {
// "verbose-logging": "true",
// },
},
// Configuration options that will get serialized into
// --server-settings server-settings.json.
serverSettings: (import "config/server-settings.libsonnet") {
visibility+: {
public: false,
lan: false,
},
},
// Configuration options that will get serialized into
// --map-settings map-settings.json.
mapSettings: (import "config/map-settings.libsonnet") {
},
// Configuration options that will get serialized into
// --map-gen-settings map-gen-settings.json.
mapGenSettings: (import "config/map-gen-settings.libsonnet") {
},
tag: "latest",
image: "registry.k0.hswaw.net/q3k/factorio:" + cfg.tag,
resources: {
requests: {
cpu: "500m",
memory: "500Mi",
},
limits: {
cpu: "2",
memory: "2Gi",
},
},
mods: [],
},
makeName(suffix):: cfg.prefix + suffix,
metadata:: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": top.makeName("factorio"),
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": "factorio",
},
},
volumeClaimMods: kube.PersistentVolumeClaim(top.makeName("factorio-mods")) {
metadata+: top.metadata,
storage:: "1Gi",
storageClass:: cfg.storageClassName,
},
volumeClaimData: kube.PersistentVolumeClaim(top.makeName("factorio")) {
metadata+: top.metadata,
storage:: "5Gi",
storageClass:: cfg.storageClassName,
},
configMap: kube.ConfigMap(top.makeName("config")) {
metadata+: top.metadata,
data: {
"mods.pb.text": std.join("\n", [
"mod { name: \"%s\" version: \"%s\" }" % [m.name, m.version],
for m in cfg.mods
]),
"server-settings.json": std.manifestJson(cfg.serverSettings),
"map-settings.json": std.manifestJson(cfg.mapSettings),
"map-gen-settings.json": std.manifestJson(cfg.mapGenSettings),
},
},
deployment: kube.Deployment(top.makeName("factorio")) {
metadata+: top.metadata,
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
data: top.volumeClaimData.volume,
mods: top.volumeClaimMods.volume,
config: top.configMap.volume,
},
initContainers_: {
modproxy: kube.Container("modproxy") {
image: cfg.proxyImage,
command: [
"/games/factorio/modproxy/client",
"-hspki_disable",
"-factorio_path", "/factorio",
"-proxy", "proxy.factorio.svc.cluster.local:4200",
"-config_path", "/factorio/mods.pb.text",
],
volumeMounts_: {
mods: { mountPath: "/factorio/mods" },
config: { mountPath: "/factorio/mods.pb.text", subPath: "mods.pb.text" },
},
},
},
containers_: {
factorio: kube.Container(top.makeName("factorio")) {
image: cfg.image,
args: [
"/entrypoint.sh",
"--rcon-port", std.toString(cfg.rconPort),
"--rcon-password", cfg.rconPassword,
"--server-settings", "/factorio/config/server-settings.json",
"--map-settings", "/factorio/config/map-settings.json",
"--map-gen-settings", "/factorio/config/map-gen-settings.json",
],
ports_: {
client: { containerPort: 34197 },
rcon: { containerPort: cfg.rconPort },
},
volumeMounts_: {
data: { mountPath: "/data" },
mods: { mountPath: "/factorio/mods" },
config: { mountPath: "/factorio/config" },
},
resources: cfg.resources,
},
},
},
},
},
},
svc: kube.Service(top.makeName("factorio")) {
metadata+: top.metadata {
// hack - have to keep existing naming scheme otherwise we'd lose addresses
labels: {
"app.kubernetes.io/name": cfg.appName,
},
},
target:: top.deployment,
spec+: {
ports: [
{ name: "client", port: 34197, targetPort: 34197, protocol: "UDP" },
],
type: "LoadBalancer",
},
},
}