blob: aeb1d35291362a372b1c48d71423564f9d6af6cb [file] [log] [blame]
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +02001// k0.hswaw.net kubernetes cluster
2// This defines the cluster as a single object.
3// Use the sibling k0*.jsonnet 'view' files to actually apply the configuration.
4
5local kube = import "../../kube/kube.libsonnet";
6local policies = import "../../kube/policies.libsonnet";
7
8local cluster = import "cluster.libsonnet";
9
10local cockroachdb = import "lib/cockroachdb.libsonnet";
11local registry = import "lib/registry.libsonnet";
12local rook = import "lib/rook.libsonnet";
13
14{
15 k0: {
16 local k0 = self,
17 cluster: cluster.Cluster("k0", "hswaw.net") {
18 cfg+: {
Serge Bazanski3d294842020-08-04 01:34:07 +020019 storageClassNameParanoid: k0.ceph.waw3Pools.blockRedundant.name,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020020 },
21 metallb+: {
22 cfg+: {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000023 // Peer with calico running on same node.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020024 peers: [
25 {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000026 "peer-address": "127.0.0.1",
27 "peer-asn": 65003,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020028 "my-asn": 65002,
29 },
30 ],
Serge Bazanskia5ed6442020-09-20 22:52:57 +000031 // Public IP address pools. Keep in sync with k0.calico.yaml.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020032 addressPools: [
33 {
34 name: "public-v4-1",
35 protocol: "bgp",
36 addresses: [
37 "185.236.240.48/28",
38 ],
39 },
40 {
41 name: "public-v4-2",
42 protocol: "bgp",
43 addresses: [
44 "185.236.240.112/28"
45 ],
46 },
47 ],
48 },
49 },
50 },
51
52 // Docker registry
53 registry: registry.Environment {
54 cfg+: {
55 domain: "registry.%s" % [k0.cluster.fqdn],
56 storageClassName: k0.cluster.cfg.storageClassNameParanoid,
Serge Bazanski3d294842020-08-04 01:34:07 +020057 objectStorageName: "waw-hdd-redundant-3-object",
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020058 },
59 },
60
61 // CockroachDB, running on bc01n{01,02,03}.
62 cockroach: {
63 waw2: cockroachdb.Cluster("crdb-waw1") {
64 cfg+: {
65 topology: [
66 { name: "bc01n01", node: "bc01n01.hswaw.net" },
67 { name: "bc01n02", node: "bc01n02.hswaw.net" },
68 { name: "bc01n03", node: "bc01n03.hswaw.net" },
69 ],
70 // Host path on SSD.
71 hostPath: "/var/db/crdb-waw1",
Serge Bazanski509ab6e2020-07-30 22:43:20 +020072 extraDNS: [
73 "crdb-waw1.hswaw.net",
74 ],
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020075 },
76 },
77 clients: {
78 cccampix: k0.cockroach.waw2.Client("cccampix"),
79 cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"),
80 buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
81 sso: k0.cockroach.waw2.Client("sso"),
Serge Bazanski509ab6e2020-07-30 22:43:20 +020082 herpDev: k0.cockroach.waw2.Client("herp-dev"),
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020083 },
84 },
85
86 ceph: {
87 // waw1 cluster - dead as of 2019/08/06, data corruption
88 // waw2 cluster: shitty 7200RPM 2.5" HDDs
89 waw2: rook.Cluster(k0.cluster.rook, "ceph-waw2") {
90 spec: {
91 mon: {
92 count: 3,
93 allowMultiplePerNode: false,
94 },
95 storage: {
96 useAllNodes: false,
97 useAllDevices: false,
98 config: {
99 databaseSizeMB: "1024",
100 journalSizeMB: "1024",
101 },
102 nodes: [
103 {
104 name: "bc01n01.hswaw.net",
105 location: "rack=dcr01 chassis=bc01 host=bc01n01",
106 devices: [ { name: "sda" } ],
107 },
108 {
109 name: "bc01n02.hswaw.net",
110 location: "rack=dcr01 chassis=bc01 host=bc01n02",
111 devices: [ { name: "sda" } ],
112 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200113 ],
114 },
115 benji:: {
116 metadataStorageClass: "waw-hdd-paranoid-2",
117 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
118 pools: [
119 "waw-hdd-redundant-2",
120 "waw-hdd-redundant-2-metadata",
121 "waw-hdd-paranoid-2",
122 "waw-hdd-yolo-2",
123 ],
124 s3Configuration: {
125 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
126 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
127 bucketName: "benji-k0-backups",
128 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
129 },
130 }
131 },
132 },
133 waw2Pools: {
134 // redundant block storage
135 blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
136 spec: {
137 failureDomain: "host",
138 erasureCoded: {
139 dataChunks: 2,
140 codingChunks: 1,
141 },
142 },
143 },
144 // paranoid block storage (3 replicas)
145 blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
146 spec: {
147 failureDomain: "host",
148 replicated: {
149 size: 3,
150 },
151 },
152 },
153 // yolo block storage (no replicas!)
154 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
155 spec: {
156 failureDomain: "host",
157 replicated: {
158 size: 1,
159 },
160 },
161 },
162 objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
163 spec: {
164 metadataPool: {
165 failureDomain: "host",
166 replicated: { size: 3 },
167 },
168 dataPool: {
169 failureDomain: "host",
170 erasureCoded: {
171 dataChunks: 2,
172 codingChunks: 1,
173 },
174 },
175 },
176 },
177 },
178
179 // waw3: 6TB SAS 3.5" HDDs
180 waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
181 spec: {
182 mon: {
Serge Bazanskicf842b02021-01-19 20:08:23 +0100183 count: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200184 allowMultiplePerNode: false,
185 },
186 storage: {
187 useAllNodes: false,
188 useAllDevices: false,
189 config: {
190 databaseSizeMB: "1024",
191 journalSizeMB: "1024",
192 },
193 nodes: [
194 {
195 name: "dcr01s22.hswaw.net",
196 location: "rack=dcr01 host=dcr01s22",
197 devices: [
198 // https://github.com/rook/rook/issues/1228
199 //{ name: "disk/by-id/wwan-0x" + wwan }
200 //for wwan in [
201 // "5000c5008508c433",
202 // "5000c500850989cf",
203 // "5000c5008508f843",
204 // "5000c5008508baf7",
205 //]
206 { name: "sdn" },
207 { name: "sda" },
208 { name: "sdb" },
209 { name: "sdc" },
210 ],
211 },
212 {
213 name: "dcr01s24.hswaw.net",
214 location: "rack=dcr01 host=dcr01s22",
215 devices: [
216 // https://github.com/rook/rook/issues/1228
217 //{ name: "disk/by-id/wwan-0x" + wwan }
218 //for wwan in [
219 // "5000c5008508ee03",
220 // "5000c5008508c9ef",
221 // "5000c5008508df33",
222 // "5000c5008508dd3b",
223 //]
224 { name: "sdm" },
225 { name: "sda" },
226 { name: "sdb" },
227 { name: "sdc" },
228 ],
229 },
230 ],
231 },
232 benji:: {
233 metadataStorageClass: "waw-hdd-redundant-3",
234 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
235 pools: [
236 "waw-hdd-redundant-3",
237 "waw-hdd-redundant-3-metadata",
238 "waw-hdd-yolo-3",
239 ],
240 s3Configuration: {
241 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
242 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
243 bucketName: "benji-k0-backups-waw3",
244 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
245 },
246 }
247 },
248 },
249 waw3Pools: {
250 // redundant block storage
251 blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") {
252 metadataReplicas: 2,
253 spec: {
254 failureDomain: "host",
255 replicated: {
256 size: 2,
257 },
258 },
259 },
260 // yolo block storage (low usage, no host redundancy)
261 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw3, "waw-hdd-yolo-3") {
262 spec: {
263 failureDomain: "osd",
264 erasureCoded: {
Serge Bazanskicf842b02021-01-19 20:08:23 +0100265 dataChunks: 2,
266 codingChunks: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200267 },
268 },
269 },
Serge Bazanski242ec582020-09-20 15:36:11 +0000270 // q3k's personal pool, used externally from k8s.
271 q3kRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-q3k-3") {
272 metadataReplicas: 2,
273 spec: {
274 failureDomain: "host",
275 replicated: {
276 size: 2,
277 },
278 },
279 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200280 objectRedundant: rook.S3ObjectStore(k0.ceph.waw3, "waw-hdd-redundant-3-object") {
281 spec: {
282 metadataPool: {
283 failureDomain: "host",
284 replicated: { size: 2 },
285 },
286 dataPool: {
287 failureDomain: "host",
288 replicated: { size: 2 },
289 },
290 },
291 },
292 },
293
294 // Clients for S3/radosgw storage.
295 clients: {
296 # Used for owncloud.hackerspace.pl, which for now lives on boston-packets.hackerspace.pl.
297 nextcloudWaw3: kube.CephObjectStoreUser("nextcloud") {
298 metadata+: {
299 namespace: "ceph-waw3",
300 },
301 spec: {
302 store: "waw-hdd-redundant-3-object",
303 displayName: "nextcloud",
304 },
305 },
306
307 # nuke@hackerspace.pl's personal storage.
308 nukePersonalWaw3: kube.CephObjectStoreUser("nuke-personal") {
309 metadata+: {
310 namespace: "ceph-waw3",
311 },
312 spec: {
313 store: "waw-hdd-redundant-3-object",
314 displayName: "nuke-personal",
315 },
316 },
317
318 # patryk@hackerspace.pl's ArmA3 mod bucket.
319 cz2ArmaModsWaw3: kube.CephObjectStoreUser("cz2-arma3mods") {
320 metadata+: {
321 namespace: "ceph-waw3",
322 },
323 spec: {
324 store: "waw-hdd-redundant-3-object",
325 displayName: "cz2-arma3mods",
326 },
327 },
Bartosz Stebeld9df5872020-06-13 21:19:40 +0200328 # Buckets for spark pipelines
329 # TODO(implr): consider a second yolo-backed one for temp data
330 implrSparkWaw3: kube.CephObjectStoreUser("implr-spark") {
331 metadata+: {
332 namespace: "ceph-waw3",
333 },
334 spec: {
335 store: "waw-hdd-redundant-3-object",
336 displayName: "implr-spark",
337 },
338 },
Sergiusz Bazanskib1aadd82020-06-24 19:06:17 +0200339 # q3k's personal user
340 q3kWaw3: kube.CephObjectStoreUser("q3k") {
341 metadata+: {
342 namespace: "ceph-waw3",
343 },
344 spec: {
345 store: "waw-hdd-redundant-3-object",
346 displayName: "q3k",
347 },
348 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100349 # woju's personal user
350 wojuWaw3: kube.CephObjectStoreUser("woju") {
351 metadata+: {
352 namespace: "ceph-waw3",
353 },
354 spec: {
355 store: "waw-hdd-redundant-3-object",
356 displayName: "woju",
357 },
Patryk Jakuszewcae7cf72020-11-28 14:36:48 +0100358 },
Patryk Jakuszew34668a52020-11-28 13:45:25 +0100359 # cz3's (patryk@hackerspace.pl) personal user
360 cz3Waw3: kube.CephObjectStoreUser("cz3") {
361 metadata+: {
362 namespace: "ceph-waw3",
363 },
364 spec: {
365 store: "waw-hdd-redundant-3-object",
366 displayName: "cz3",
367 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100368 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200369 },
370 },
371
372
373 # These are policies allowing for Insecure pods in some namespaces.
374 # A lot of them are spurious and come from the fact that we deployed
375 # these namespaces before we deployed the draconian PodSecurityPolicy
376 # we have now. This should be fixed by setting up some more granular
377 # policies, or fixing the workloads to not need some of the permission
378 # bits they use, whatever those might be.
379 # TODO(q3k): fix this?
380 unnecessarilyInsecureNamespaces: [
381 policies.AllowNamespaceInsecure("ceph-waw2"),
382 policies.AllowNamespaceInsecure("ceph-waw3"),
383 policies.AllowNamespaceInsecure("matrix"),
384 policies.AllowNamespaceInsecure("registry"),
385 policies.AllowNamespaceInsecure("internet"),
386 # TODO(implr): restricted policy with CAP_NET_ADMIN and tuntap, but no full root
387 policies.AllowNamespaceInsecure("implr-vpn"),
388 ],
389 },
390}