blob: fa96cf267508ae8863f20a5ecefd8c15ef8e4330 [file] [log] [blame]
// things should not just break requiring a restart (or at least, a liveness probe should do the job)
// but in the real world, periodically restarting an app is sometimes the easiest way to improve reliability
{
kube:: import "kube.libsonnet",
local kube = self.kube,
local top = self,
local cfg = self.cfg,
cfg:: {
// default to daily at 3am
schedule: '0 3 * * *',
// point to Deployment/StatefulSet/DaemonSet
target: error 'cfg.target is required',
targetStr: '%s/%s' % [self.target.kind, self.target.metadata.name],
name: '%s-restarter' % [self.target.metadata.name],
},
metadata:: {
namespace: error 'metadata.namespace is required',
},
local ns = kube.Namespace(top.metadata.namespace),
cronjob: ns.Contain(kube.CronJob(cfg.name)) {
spec+: {
schedule: cfg.schedule,
concurrencyPolicy: 'Replace',
jobTemplate+: {
spec+: {
template+: {
spec+: {
serviceAccountName: top.sa.metadata.name,
restartPolicy: 'Never',
containers_: {
default: kube.Container('default') {
image: 'bitnami/kubectl:1.31.3',
command: ['kubectl', 'rollout', 'restart', cfg.targetStr],
},
},
},
},
},
},
},
},
sa: ns.Contain(kube.ServiceAccount(cfg.name)),
role: ns.Contain(kube.Role(cfg.name)) {
rules: [
{
apiGroups: ["apps"],
resources: [std.asciiLower(cfg.target.kind) + 's'],
resourceNames: [cfg.target.metadata.name],
verbs: ['get', 'patch'],
},
],
},
rb: ns.Contain(kube.RoleBinding(cfg.name)) {
roleRef_: top.role,
subjects_: [top.sa],
},
}