blob: 5ad09f261f74b5bf9860a8bd4a1ab7596032e46c [file] [log] [blame]
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +02001// k0.hswaw.net kubernetes cluster
2// This defines the cluster as a single object.
3// Use the sibling k0*.jsonnet 'view' files to actually apply the configuration.
4
5local kube = import "../../kube/kube.libsonnet";
6local policies = import "../../kube/policies.libsonnet";
7
8local cluster = import "cluster.libsonnet";
9
10local cockroachdb = import "lib/cockroachdb.libsonnet";
11local registry = import "lib/registry.libsonnet";
12local rook = import "lib/rook.libsonnet";
13
14{
15 k0: {
16 local k0 = self,
17 cluster: cluster.Cluster("k0", "hswaw.net") {
18 cfg+: {
Serge Bazanski3d294842020-08-04 01:34:07 +020019 storageClassNameParanoid: k0.ceph.waw3Pools.blockRedundant.name,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020020 },
21 metallb+: {
22 cfg+: {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000023 // Peer with calico running on same node.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020024 peers: [
25 {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000026 "peer-address": "127.0.0.1",
27 "peer-asn": 65003,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020028 "my-asn": 65002,
29 },
30 ],
Serge Bazanskia5ed6442020-09-20 22:52:57 +000031 // Public IP address pools. Keep in sync with k0.calico.yaml.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020032 addressPools: [
33 {
34 name: "public-v4-1",
35 protocol: "bgp",
36 addresses: [
37 "185.236.240.48/28",
38 ],
39 },
40 {
41 name: "public-v4-2",
42 protocol: "bgp",
43 addresses: [
44 "185.236.240.112/28"
45 ],
46 },
47 ],
48 },
49 },
50 },
51
52 // Docker registry
53 registry: registry.Environment {
54 cfg+: {
55 domain: "registry.%s" % [k0.cluster.fqdn],
56 storageClassName: k0.cluster.cfg.storageClassNameParanoid,
Serge Bazanski3d294842020-08-04 01:34:07 +020057 objectStorageName: "waw-hdd-redundant-3-object",
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020058 },
59 },
60
61 // CockroachDB, running on bc01n{01,02,03}.
62 cockroach: {
63 waw2: cockroachdb.Cluster("crdb-waw1") {
64 cfg+: {
65 topology: [
66 { name: "bc01n01", node: "bc01n01.hswaw.net" },
67 { name: "bc01n02", node: "bc01n02.hswaw.net" },
Patryk Jakuszewedf14cc2021-01-23 23:00:29 +010068 { name: "dcr01s22", node: "dcr01s22.hswaw.net" },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020069 ],
70 // Host path on SSD.
71 hostPath: "/var/db/crdb-waw1",
Serge Bazanski509ab6e2020-07-30 22:43:20 +020072 extraDNS: [
73 "crdb-waw1.hswaw.net",
74 ],
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020075 },
76 },
77 clients: {
78 cccampix: k0.cockroach.waw2.Client("cccampix"),
79 cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"),
80 buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
81 sso: k0.cockroach.waw2.Client("sso"),
Serge Bazanski509ab6e2020-07-30 22:43:20 +020082 herpDev: k0.cockroach.waw2.Client("herp-dev"),
Patryk Jakuszewf3153882021-01-23 15:38:50 +010083 gitea: k0.cockroach.waw2.Client("gitea"),
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020084 },
85 },
86
87 ceph: {
88 // waw1 cluster - dead as of 2019/08/06, data corruption
Serge Bazanski61f978a2021-01-22 16:26:07 +010089 // waw2 cluster - dead as of 2021/01/22, torn down (horrible M610 RAID controllers are horrible)
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020090
91 // waw3: 6TB SAS 3.5" HDDs
92 waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
93 spec: {
94 mon: {
Serge Bazanskicf842b02021-01-19 20:08:23 +010095 count: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020096 allowMultiplePerNode: false,
97 },
98 storage: {
99 useAllNodes: false,
100 useAllDevices: false,
101 config: {
102 databaseSizeMB: "1024",
103 journalSizeMB: "1024",
104 },
105 nodes: [
106 {
107 name: "dcr01s22.hswaw.net",
108 location: "rack=dcr01 host=dcr01s22",
109 devices: [
110 // https://github.com/rook/rook/issues/1228
111 //{ name: "disk/by-id/wwan-0x" + wwan }
112 //for wwan in [
113 // "5000c5008508c433",
114 // "5000c500850989cf",
115 // "5000c5008508f843",
116 // "5000c5008508baf7",
117 //]
118 { name: "sdn" },
119 { name: "sda" },
120 { name: "sdb" },
121 { name: "sdc" },
122 ],
123 },
124 {
125 name: "dcr01s24.hswaw.net",
126 location: "rack=dcr01 host=dcr01s22",
127 devices: [
128 // https://github.com/rook/rook/issues/1228
129 //{ name: "disk/by-id/wwan-0x" + wwan }
130 //for wwan in [
131 // "5000c5008508ee03",
132 // "5000c5008508c9ef",
133 // "5000c5008508df33",
134 // "5000c5008508dd3b",
135 //]
136 { name: "sdm" },
137 { name: "sda" },
138 { name: "sdb" },
139 { name: "sdc" },
140 ],
141 },
142 ],
143 },
144 benji:: {
145 metadataStorageClass: "waw-hdd-redundant-3",
146 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
147 pools: [
148 "waw-hdd-redundant-3",
149 "waw-hdd-redundant-3-metadata",
150 "waw-hdd-yolo-3",
151 ],
152 s3Configuration: {
153 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
154 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
155 bucketName: "benji-k0-backups-waw3",
156 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
157 },
158 }
159 },
160 },
161 waw3Pools: {
162 // redundant block storage
163 blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") {
164 metadataReplicas: 2,
165 spec: {
166 failureDomain: "host",
167 replicated: {
168 size: 2,
169 },
170 },
171 },
172 // yolo block storage (low usage, no host redundancy)
173 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw3, "waw-hdd-yolo-3") {
174 spec: {
175 failureDomain: "osd",
176 erasureCoded: {
Serge Bazanskicf842b02021-01-19 20:08:23 +0100177 dataChunks: 2,
178 codingChunks: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200179 },
180 },
181 },
Serge Bazanski242ec582020-09-20 15:36:11 +0000182 // q3k's personal pool, used externally from k8s.
183 q3kRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-q3k-3") {
184 metadataReplicas: 2,
185 spec: {
186 failureDomain: "host",
187 replicated: {
188 size: 2,
189 },
190 },
191 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200192 objectRedundant: rook.S3ObjectStore(k0.ceph.waw3, "waw-hdd-redundant-3-object") {
193 spec: {
194 metadataPool: {
195 failureDomain: "host",
196 replicated: { size: 2 },
197 },
198 dataPool: {
199 failureDomain: "host",
200 replicated: { size: 2 },
201 },
202 },
203 },
204 },
205
206 // Clients for S3/radosgw storage.
207 clients: {
208 # Used for owncloud.hackerspace.pl, which for now lives on boston-packets.hackerspace.pl.
209 nextcloudWaw3: kube.CephObjectStoreUser("nextcloud") {
210 metadata+: {
211 namespace: "ceph-waw3",
212 },
213 spec: {
214 store: "waw-hdd-redundant-3-object",
215 displayName: "nextcloud",
216 },
217 },
Piotr Dobrowolski3b8a43f2021-02-01 21:19:48 +0100218 # issues.hackerspace.pl (redmine) attachments bucket
219 issuesWaw3: kube.CephObjectStoreUser("issues") {
220 metadata+: {
221 namespace: "ceph-waw3",
222 },
223 spec: {
224 store: "waw-hdd-redundant-3-object",
225 displayName: "issues",
226 },
227 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200228
229 # nuke@hackerspace.pl's personal storage.
230 nukePersonalWaw3: kube.CephObjectStoreUser("nuke-personal") {
231 metadata+: {
232 namespace: "ceph-waw3",
233 },
234 spec: {
235 store: "waw-hdd-redundant-3-object",
236 displayName: "nuke-personal",
237 },
238 },
239
240 # patryk@hackerspace.pl's ArmA3 mod bucket.
241 cz2ArmaModsWaw3: kube.CephObjectStoreUser("cz2-arma3mods") {
242 metadata+: {
243 namespace: "ceph-waw3",
244 },
245 spec: {
246 store: "waw-hdd-redundant-3-object",
247 displayName: "cz2-arma3mods",
248 },
249 },
Bartosz Stebeld9df5872020-06-13 21:19:40 +0200250 # Buckets for spark pipelines
251 # TODO(implr): consider a second yolo-backed one for temp data
252 implrSparkWaw3: kube.CephObjectStoreUser("implr-spark") {
253 metadata+: {
254 namespace: "ceph-waw3",
255 },
256 spec: {
257 store: "waw-hdd-redundant-3-object",
258 displayName: "implr-spark",
259 },
260 },
Sergiusz Bazanskib1aadd82020-06-24 19:06:17 +0200261 # q3k's personal user
262 q3kWaw3: kube.CephObjectStoreUser("q3k") {
263 metadata+: {
264 namespace: "ceph-waw3",
265 },
266 spec: {
267 store: "waw-hdd-redundant-3-object",
268 displayName: "q3k",
269 },
270 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100271 # woju's personal user
272 wojuWaw3: kube.CephObjectStoreUser("woju") {
273 metadata+: {
274 namespace: "ceph-waw3",
275 },
276 spec: {
277 store: "waw-hdd-redundant-3-object",
278 displayName: "woju",
279 },
Patryk Jakuszewcae7cf72020-11-28 14:36:48 +0100280 },
Patryk Jakuszew34668a52020-11-28 13:45:25 +0100281 # cz3's (patryk@hackerspace.pl) personal user
282 cz3Waw3: kube.CephObjectStoreUser("cz3") {
283 metadata+: {
284 namespace: "ceph-waw3",
285 },
286 spec: {
287 store: "waw-hdd-redundant-3-object",
288 displayName: "cz3",
289 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100290 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200291 },
292 },
293
294
295 # These are policies allowing for Insecure pods in some namespaces.
296 # A lot of them are spurious and come from the fact that we deployed
297 # these namespaces before we deployed the draconian PodSecurityPolicy
298 # we have now. This should be fixed by setting up some more granular
299 # policies, or fixing the workloads to not need some of the permission
300 # bits they use, whatever those might be.
301 # TODO(q3k): fix this?
302 unnecessarilyInsecureNamespaces: [
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200303 policies.AllowNamespaceInsecure("ceph-waw3"),
304 policies.AllowNamespaceInsecure("matrix"),
305 policies.AllowNamespaceInsecure("registry"),
306 policies.AllowNamespaceInsecure("internet"),
307 # TODO(implr): restricted policy with CAP_NET_ADMIN and tuntap, but no full root
308 policies.AllowNamespaceInsecure("implr-vpn"),
309 ],
310 },
311}