blob: a02b69affec61bb94461bb5a26b54d44d69ae99e [file] [log] [blame]
# Factorio on Kubernetes.
local kube = import "../../../kube/kube.libsonnet";
local proxy = import "proxy.libsonnet";
{
local factorio = self,
local cfg = factorio.cfg,
cfg:: {
namespace: error "namespace must be set",
appName: "factorio",
storageClassName: "waw-hdd-redundant-3",
prefix: "", # if set, should be 'foo-'
proxyImage: proxy.cfg.image,
rconPort: 2137,
rconPassword: "farts",
// Configuration options that will get serialized into
// --config config.ini
factorioConfig: {
// There is no documentation for this file, but you can check
// config.ini in any installed Factorio data directory for a
// sample.
//
// This uses std.manifestIni, so to create a file containing:
// version=9
// [other]
// verbose-logging=true
// You would set the following:
// main: {
// version: "9",
// },
// other: {
// "verbose-logging": "true",
// },
},
// Configuration options that will get serialized into
// --server-settings server-settings.json.
serverSettings: (import "config/server-settings.libsonnet") {
visibility+: {
public: false,
lan: false,
},
},
// Configuration options that will get serialized into
// --map-settings map-settings.json.
mapSettings: (import "config/map-settings.libsonnet") {
},
// Configuration options that will get serialized into
// --map-gen-settings map-gen-settings.json.
mapGenSettings: (import "config/map-gen-settings.libsonnet") {
},
tag: "latest",
image: "registry.k0.hswaw.net/q3k/factorio:" + cfg.tag,
resources: {
requests: {
cpu: "500m",
memory: "500Mi",
},
limits: {
cpu: "2",
memory: "2Gi",
},
},
mods: [],
},
makeName(suffix):: cfg.prefix + suffix,
metadata:: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": factorio.makeName("factorio"),
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": "factorio",
},
},
volumeClaimMods: kube.PersistentVolumeClaim(factorio.makeName("factorio-mods")) {
metadata+: factorio.metadata,
storage: "1Gi",
storageClass: cfg.storageClassName,
},
volumeClaimData: kube.PersistentVolumeClaim(factorio.makeName("factorio")) {
metadata+: factorio.metadata,
storage: "5Gi",
storageClass: cfg.storageClassName,
},
configMap: kube.ConfigMap(factorio.makeName("config")) {
metadata+: factorio.metadata,
data: {
"mods.pb.text": std.join("\n", [
"mod { name: \"%s\" version: \"%s\" }" % [m.name, m.version],
for m in cfg.mods
]),
"server-settings.json": std.manifestJson(cfg.serverSettings),
"map-settings.json": std.manifestJson(cfg.mapSettings),
"map-gen-settings.json": std.manifestJson(cfg.mapGenSettings),
},
},
deployment: kube.Deployment(factorio.makeName("factorio")) {
metadata+: factorio.metadata,
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
data: kube.PersistentVolumeClaimVolume(factorio.volumeClaimData),
mods: kube.PersistentVolumeClaimVolume(factorio.volumeClaimMods),
config: kube.ConfigMapVolume(factorio.configMap),
},
initContainers_: {
modproxy: kube.Container("modproxy") {
image: cfg.proxyImage,
command: [
"/games/factorio/modproxy/client",
"-hspki_disable",
"-factorio_path", "/factorio",
"-proxy", "proxy.factorio.svc.cluster.local:4200",
"-config_path", "/factorio/mods.pb.text",
],
volumeMounts_: {
mods: { mountPath: "/factorio/mods" },
config: { mountPath: "/factorio/mods.pb.text", subPath: "mods.pb.text" },
},
},
},
containers_: {
factorio: kube.Container(factorio.makeName("factorio")) {
image: cfg.image,
args: [
"/entrypoint.sh",
"--rcon-port", std.toString(cfg.rconPort),
"--rcon-password", cfg.rconPassword,
"--server-settings", "/factorio/config/server-settings.json",
"--map-settings", "/factorio/config/map-settings.json",
"--map-gen-settings", "/factorio/config/map-gen-settings.json",
],
ports_: {
client: { containerPort: 34197 },
rcon: { containerPort: cfg.rconPort },
},
volumeMounts_: {
data: { mountPath: "/data" },
mods: { mountPath: "/factorio/mods" },
config: { mountPath: "/factorio/config" },
},
resources: cfg.resources,
},
},
},
},
},
},
svc: kube.Service(factorio.makeName("factorio")) {
metadata+: factorio.metadata {
// hack - have to keep existing naming scheme otherwise we'd lose addresses
labels: {
"app.kubernetes.io/name": cfg.appName,
},
},
target:: factorio.deployment,
spec+: {
ports: [
{ name: "client", port: 34197, targetPort: 34197, protocol: "UDP" },
],
type: "LoadBalancer",
},
},
}