blob: 8e8673a8086e1b37e50829fe3f43c73b62642a05 [file] [log] [blame]
# Deploy a per-cluster Nginx Ingress Controller
local kube = import "../../../kube/kube.libsonnet";
local policies = import "../../../kube/policies.libsonnet";
{
Environment: {
local env = self,
local cfg = env.cfg,
cfg:: {
# Built from nginx-ingress-controller/Dockerfile:
#
# $ cd cluster/kube/lib/nginx-ingress-controller
# $ docker build -t eu.gcr.io/bgpwtf/nginx-ingress-controller:v0.44.0-r1 .
# [..]
# (2/8) Upgrading libcrypto1.1 (1.1.1i-r0 -> 1.1.1k-r0)
# (3/8) Upgrading libssl1.1 (1.1.1i-r0 -> 1.1.1k-r0
# [...]
# (8/8) Upgrading openssl (1.1.1i-r0 -> 1.1.1k-r0)
# $ docker push eu.gcr.io/bgpwtf/nginx-ingress-controller:v0.44.0-r1
#
# TODO(q3k): unfork this once openssl 1.1.1k lands in upstream
# nginx-ingress-controller.
image: "eu.gcr.io/bgpwtf/nginx-ingress-controller:v0.44.0-r1",
namespace: "nginx-system",
},
metadata:: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": "ingress-nginx",
"app.kubernetes.io/part-of": "ingress-nginx",
},
},
namespace: kube.Namespace(cfg.namespace),
allowInsecure: policies.AllowNamespaceInsecure(cfg.namespace),
maps: {
make(name):: kube.ConfigMap(name) {
metadata+: env.metadata,
},
configuration: env.maps.make("nginx-configuration") {
data: {
"proxy-set-headers": "%s/nginx-custom-headers" % [cfg.namespace],
},
},
customHeaders: env.maps.make("nginx-custom-headers") {
data: {
# RFC6648 deprecates X-prefixed headers as a convention in
# multiple application protocols, including HTTP. It
# recommends that any new headers should just start off
# with a final standardized name, ie. suggests to use
# Toaster-ID instead of X-Toaster-ID.
#
# However, it also acknowledges that headers likely to
# never be standardized can still be prefixed with OrgName-
# or other constructs. And since we're not even attempting
# to standardize anything here, this is what we use to
# prefix hscloud-specific headers.
#
# Hscloud == hscloud, this repository.
# Nic == nginx-ingress-controller, this ingress controller.
# Set source port/addr. Source-IP duplicates
# X-Forwarded-For, but is added for consistency with
# Source-Port.
#
# Source-IP is an IP address in two possible formats:
# IPv4: "1.2.3.4"
# IPv6: "2a0d:1234::42"
# Any other format received by services should be
# considered invalid, and the service should assume a
# misconfiguration of the N-I-C.
"Hscloud-Nic-Source-IP": "${remote_addr}",
# Source-Port is a stringified TCP port, encoding a port
# number from 1 to 65535. Any other value received by
# services should be considered invalid, and the service
# should assume a misconfiguration of the N-I-C.
"Hscloud-Nic-Source-Port": "${remote_port}",
},
},
tcp: env.maps.make("tcp-services") {
data: {
"22": "gerrit/gerrit:22",
}
},
udp: env.maps.make("udp-services"),
},
sa: kube.ServiceAccount("nginx-ingress-serviceaccount") {
metadata+: env.metadata,
},
cr: kube.ClusterRole("nginx-ingress-clusterrole") {
metadata+: env.metadata {
namespace:: null,
},
rules: [
{
apiGroups: [""],
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"],
verbs: ["list", "watch"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get"],
},
{
apiGroups: [""],
resources: ["services"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingresses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["create", "patch"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingresses/status"],
verbs: ["update"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingressclasses"],
verbs: ["get", "list", "watch"],
},
],
},
crb: kube.ClusterRoleBinding("nginx-ingress-clusterrole-nisa-binding") {
metadata+: env.metadata {
namespace:: null,
},
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
name: env.cr.metadata.name,
},
subjects: [
{
kind: "ServiceAccount",
name: env.sa.metadata.name,
namespace: env.sa.metadata.namespace,
},
],
},
role: kube.Role("nginx-ingress-role") {
metadata+: env.metadata,
rules : [
{
apiGroups: [""],
resources: ["namespaces"],
verbs: ["get"],
},
{
apiGroups: [""],
resources: ["configmaps", "pods", "secrets", "endpoints"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["services"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingresses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingresses/status"],
verbs: ["update"],
},
{
apiGroups: ["extensions", "networking.k8s.io"],
resources: ["ingressclasses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["configmaps"],
resourceNames: ["ingress-controller-leader-nginx"],
verbs: ["get", "update"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["create"],
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["create", "patch"],
},
],
},
roleb: kube.RoleBinding("nginx-ingress-role-nisa-binding") {
metadata+: env.metadata,
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "Role",
name: env.role.metadata.name,
},
subjects: [
{
kind: "ServiceAccount",
name: env.sa.metadata.name,
namespace: env.sa.metadata.namespace,
},
],
},
service: kube.Service("ingress-nginx") {
metadata+: env.metadata,
target_pod:: env.deployment.spec.template,
spec+: {
type: "LoadBalancer",
ports: [
{ name: "ssh", port: 22, targetPort: 22, protocol: "TCP" },
{ name: "http", port: 80, targetPort: 80, protocol: "TCP" },
{ name: "https", port: 443, targetPort: 443, protocol: "TCP" },
],
},
},
serviceGitea: kube.Service("ingress-nginx-gitea") {
metadata+: env.metadata,
target_pod:: env.deployment.spec.template,
spec+: {
type: "LoadBalancer",
loadBalancerIP: "185.236.240.60",
ports: [
{ name: "ssh", port: 22, targetPort: 222, protocol: "TCP" },
{ name: "http", port: 80, targetPort: 80, protocol: "TCP" },
{ name: "https", port: 443, targetPort: 443, protocol: "TCP" },
],
},
},
deployment: kube.Deployment("nginx-ingress-controller") {
metadata+: env.metadata,
spec+: {
replicas: 5,
template+: {
spec+: {
serviceAccountName: env.sa.metadata.name,
containers_: {
controller: kube.Container("nginx-ingress-controller") {
image: cfg.image,
imagePullPolicy: "IfNotPresent",
lifecycle: {
preStop: {
exec: {
command: [ "/wait-shutdown" ],
},
},
},
args: [
"/nginx-ingress-controller",
"--election-id=ingress-controller-leader",
"--ingress-class=nginx",
"--configmap=%s/%s" % [cfg.namespace, env.maps.configuration.metadata.name],
"--tcp-services-configmap=%s/%s" % [cfg.namespace, env.maps.tcp.metadata.name],
"--udp-services-configmap=%s/%s" % [cfg.namespace, env.maps.udp.metadata.name],
"--publish-service=%s/%s" % [cfg.namespace, env.service.metadata.name],
"--annotations-prefix=nginx.ingress.kubernetes.io",
],
env_: {
POD_NAME: kube.FieldRef("metadata.name"),
POD_NAMESPACE: kube.FieldRef("metadata.namespace"),
},
ports_: {
http: { containerPort: 80 },
https: { containerPort: 443 },
},
livenessProbe: {
failureThreshold: 3,
httpGet: {
path: "/healthz",
port: 10254,
scheme: "HTTP",
},
initialDelaySeconds: 10,
periodSeconds: 10,
successThreshold: 1,
timeoutSeconds: 10,
},
readinessProbe: {
failureThreshold: 3,
httpGet: {
path: "/healthz",
port: 10254,
scheme: "HTTP",
},
periodSeconds: 10,
successThreshold: 1,
timeoutSeconds: 10,
},
securityContext: {
allowPrivilegeEscalation: true,
capabilities: {
drop: ["ALL"],
add: ["NET_BIND_SERVICE"],
},
runAsUser: 101,
},
resources: {
limits: { cpu: "2", memory: "4G" },
requests: { cpu: "1", memory: "1G" },
},
},
},
},
},
},
},
},
}