| local kube = import "../../../kube/kube.libsonnet"; |
| |
| { |
| local app = self, |
| local cfg = app.cfg, |
| cfg:: { |
| image: error "cfg.image must be set", |
| realm: error "cfg.realm must be set", |
| authSecret: error "cfg.authSecret must be set", |
| storageClassName: error "cfg.storageClassName must be set", |
| |
| portStart: 49152, |
| portEnd: 49172, |
| loadBalancerIP: null, |
| }, |
| |
| ns:: error "ns needs to be provided", |
| |
| configMap: app.ns.Contain(kube.ConfigMap("coturn")) { |
| data: { |
| "coturn.conf": ||| |
| # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay. |
| no-tcp-relay |
| |
| no-tls |
| no-dtls |
| |
| # don't let the relay ever try to connect to private IP address ranges within your network (if any) |
| # given the turn server is likely behind your firewall, remember to include any privileged public IPs too. |
| denied-peer-ip=10.0.0.0-10.255.255.255 |
| denied-peer-ip=192.168.0.0-192.168.255.255 |
| denied-peer-ip=172.16.0.0-172.31.255.255 |
| |
| # recommended additional local peers to block, to mitigate external access to internal services. |
| # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability |
| no-multicast-peers |
| denied-peer-ip=0.0.0.0-0.255.255.255 |
| denied-peer-ip=100.64.0.0-100.127.255.255 |
| denied-peer-ip=127.0.0.0-127.255.255.255 |
| denied-peer-ip=169.254.0.0-169.254.255.255 |
| denied-peer-ip=192.0.0.0-192.0.0.255 |
| denied-peer-ip=192.0.2.0-192.0.2.255 |
| denied-peer-ip=192.88.99.0-192.88.99.255 |
| denied-peer-ip=198.18.0.0-198.19.255.255 |
| denied-peer-ip=198.51.100.0-198.51.100.255 |
| denied-peer-ip=203.0.113.0-203.0.113.255 |
| denied-peer-ip=240.0.0.0-255.255.255.255 |
| |
| # special case the turn server itself so that client->TURN->TURN->client flows work |
| # this should be one of the turn server's listening IPs |
| # FIXME allowed-peer-ip=10.0.0.1 |
| |
| # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS. |
| user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user. |
| total-quota=1200 |
| |
| use-auth-secret |
| |||, |
| }, |
| }, |
| |
| dataVolume: app.ns.Contain(kube.PersistentVolumeClaim("coturn-data")) { |
| spec+: { |
| storageClassName: cfg.storageClassName, |
| resources: { |
| requests: { |
| storage: "10Gi", |
| }, |
| }, |
| }, |
| }, |
| |
| deployment: app.ns.Contain(kube.Deployment("coturn")) { |
| spec+: { |
| replicas: 1, |
| template+: { |
| spec+: { |
| volumes_: { |
| config: kube.ConfigMapVolume(app.configMap), |
| data: kube.PersistentVolumeClaimVolume(app.dataVolume), |
| }, |
| containers_: { |
| coturn: kube.Container("coturn") { |
| image: cfg.image, |
| ports_: { |
| turn: { containerPort: 3478 }, |
| } + { |
| ["fwd-%d" % [n]]: { containerPort: n } |
| for n in std.range(cfg.portStart, cfg.portEnd) |
| }, |
| |
| command: [ |
| # This disgusting hack comes from the fact that |
| # official coturn containers have turnserver |
| # binary set up with CAP_NET_BIND_SERVICE=+ep, |
| # while there's really no use that in our case. |
| # |
| # Due to our PSP we can't exec said binary, nor |
| # can we chmod/chown/setcap on it, as we are |
| # running as an unprivileged user. |
| # |
| # Copying it over is the easiest method of |
| # stripping said spurious cap. |
| "/bin/sh", "-c", |
| "cp /usr/bin/turnserver /tmp/turnserver && \\ |
| exec /tmp/turnserver \\ |
| -c /config/coturn.conf \\ |
| --log-binding \\ |
| --realm=$(COTURN_REALM) \\ |
| --static-auth-secret=$(COTRN_STATIC_AUTH_SECRET) \\ |
| --min-port $(COTURN_MIN_PORT) \\ |
| --max-port $(COTURN_MAX_PORT) \\ |
| " + if cfg.loadBalancerIP != null then "-X $(COTURN_EXTERNAL_IP)" else "", |
| ], |
| volumeMounts_: { |
| config: { mountPath: "/config" }, |
| data: { mountPath: "/var/lib/coturn" }, |
| }, |
| env_: { |
| COTURN_REALM: cfg.realm, |
| COTURN_STATIC_AUTH_SECRET: cfg.authSecret, |
| COTURN_EXTERNAL_IP: cfg.loadBalancerIP, |
| COTURN_MIN_PORT: cfg.portStart, |
| COTURN_MAX_PORT: cfg.portEnd, |
| }, |
| }, |
| }, |
| securityContext: { |
| runAsUser: 1000, |
| runAsGroup: 1000, |
| fsGroup: 2000, |
| }, |
| }, |
| }, |
| }, |
| }, |
| |
| svcTCP: app.ns.Contain(kube.Service("coturn-tcp")) { |
| target_pod:: app.deployment.spec.template, |
| metadata+: { |
| annotations+: { |
| "metallb.universe.tf/allow-shared-ip": "coturn", |
| }, |
| }, |
| spec+: { |
| type: "LoadBalancer", |
| loadBalancerIP: cfg.loadBalancerIP, |
| externalTrafficPolicy: "Local", |
| ports: [ |
| { name: "turn", port: 3478, protocol: "TCP" }, |
| ] + [ |
| { name: "fwd-%d" % [n], port: n, protocol: "TCP" } |
| for n in std.range(cfg.portStart, cfg.portEnd) |
| ], |
| }, |
| }, |
| |
| svcUDP: app.ns.Contain(kube.Service("coturn-udp")) { |
| target_pod:: app.deployment.spec.template, |
| metadata+: { |
| annotations+: { |
| "metallb.universe.tf/allow-shared-ip": "coturn", |
| }, |
| }, |
| spec+: { |
| type: "LoadBalancer", |
| loadBalancerIP: cfg.loadBalancerIP, |
| externalTrafficPolicy: "Local", |
| ports: [ |
| { name: "turn", port: 3478, protocol: "UDP" }, |
| ] + [ |
| { name: "fwd-%d" % [n], port: n, protocol: "UDP" } |
| for n in std.range(cfg.portStart, cfg.portEnd) |
| ], |
| }, |
| }, |
| } |