blob: ad6b735ac28f050d44d358ddb3e0916fac8e8693 [file] [log] [blame]
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +02001// k0.hswaw.net kubernetes cluster
2// This defines the cluster as a single object.
3// Use the sibling k0*.jsonnet 'view' files to actually apply the configuration.
4
5local kube = import "../../kube/kube.libsonnet";
6local policies = import "../../kube/policies.libsonnet";
7
8local cluster = import "cluster.libsonnet";
9
10local cockroachdb = import "lib/cockroachdb.libsonnet";
11local registry = import "lib/registry.libsonnet";
12local rook = import "lib/rook.libsonnet";
13
14{
15 k0: {
16 local k0 = self,
17 cluster: cluster.Cluster("k0", "hswaw.net") {
18 cfg+: {
Serge Bazanski3d294842020-08-04 01:34:07 +020019 storageClassNameParanoid: k0.ceph.waw3Pools.blockRedundant.name,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020020 },
21 metallb+: {
22 cfg+: {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000023 // Peer with calico running on same node.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020024 peers: [
25 {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000026 "peer-address": "127.0.0.1",
27 "peer-asn": 65003,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020028 "my-asn": 65002,
29 },
30 ],
Serge Bazanskia5ed6442020-09-20 22:52:57 +000031 // Public IP address pools. Keep in sync with k0.calico.yaml.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020032 addressPools: [
33 {
34 name: "public-v4-1",
35 protocol: "bgp",
36 addresses: [
37 "185.236.240.48/28",
38 ],
39 },
40 {
41 name: "public-v4-2",
42 protocol: "bgp",
43 addresses: [
44 "185.236.240.112/28"
45 ],
46 },
47 ],
48 },
49 },
50 },
51
52 // Docker registry
53 registry: registry.Environment {
54 cfg+: {
55 domain: "registry.%s" % [k0.cluster.fqdn],
56 storageClassName: k0.cluster.cfg.storageClassNameParanoid,
Serge Bazanski3d294842020-08-04 01:34:07 +020057 objectStorageName: "waw-hdd-redundant-3-object",
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020058 },
59 },
60
61 // CockroachDB, running on bc01n{01,02,03}.
62 cockroach: {
63 waw2: cockroachdb.Cluster("crdb-waw1") {
64 cfg+: {
65 topology: [
66 { name: "bc01n01", node: "bc01n01.hswaw.net" },
67 { name: "bc01n02", node: "bc01n02.hswaw.net" },
68 { name: "bc01n03", node: "bc01n03.hswaw.net" },
69 ],
70 // Host path on SSD.
71 hostPath: "/var/db/crdb-waw1",
Serge Bazanski509ab6e2020-07-30 22:43:20 +020072 extraDNS: [
73 "crdb-waw1.hswaw.net",
74 ],
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020075 },
76 },
77 clients: {
78 cccampix: k0.cockroach.waw2.Client("cccampix"),
79 cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"),
80 buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
81 sso: k0.cockroach.waw2.Client("sso"),
Serge Bazanski509ab6e2020-07-30 22:43:20 +020082 herpDev: k0.cockroach.waw2.Client("herp-dev"),
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020083 },
84 },
85
86 ceph: {
87 // waw1 cluster - dead as of 2019/08/06, data corruption
88 // waw2 cluster: shitty 7200RPM 2.5" HDDs
89 waw2: rook.Cluster(k0.cluster.rook, "ceph-waw2") {
90 spec: {
91 mon: {
92 count: 3,
93 allowMultiplePerNode: false,
94 },
95 storage: {
96 useAllNodes: false,
97 useAllDevices: false,
98 config: {
99 databaseSizeMB: "1024",
100 journalSizeMB: "1024",
101 },
102 nodes: [
103 {
104 name: "bc01n01.hswaw.net",
105 location: "rack=dcr01 chassis=bc01 host=bc01n01",
106 devices: [ { name: "sda" } ],
107 },
108 {
109 name: "bc01n02.hswaw.net",
110 location: "rack=dcr01 chassis=bc01 host=bc01n02",
111 devices: [ { name: "sda" } ],
112 },
113 {
114 name: "bc01n03.hswaw.net",
115 location: "rack=dcr01 chassis=bc01 host=bc01n03",
116 devices: [ { name: "sda" } ],
117 },
118 ],
119 },
120 benji:: {
121 metadataStorageClass: "waw-hdd-paranoid-2",
122 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
123 pools: [
124 "waw-hdd-redundant-2",
125 "waw-hdd-redundant-2-metadata",
126 "waw-hdd-paranoid-2",
127 "waw-hdd-yolo-2",
128 ],
129 s3Configuration: {
130 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
131 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
132 bucketName: "benji-k0-backups",
133 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
134 },
135 }
136 },
137 },
138 waw2Pools: {
139 // redundant block storage
140 blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
141 spec: {
142 failureDomain: "host",
143 erasureCoded: {
144 dataChunks: 2,
145 codingChunks: 1,
146 },
147 },
148 },
149 // paranoid block storage (3 replicas)
150 blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
151 spec: {
152 failureDomain: "host",
153 replicated: {
154 size: 3,
155 },
156 },
157 },
158 // yolo block storage (no replicas!)
159 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
160 spec: {
161 failureDomain: "host",
162 replicated: {
163 size: 1,
164 },
165 },
166 },
167 objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
168 spec: {
169 metadataPool: {
170 failureDomain: "host",
171 replicated: { size: 3 },
172 },
173 dataPool: {
174 failureDomain: "host",
175 erasureCoded: {
176 dataChunks: 2,
177 codingChunks: 1,
178 },
179 },
180 },
181 },
182 },
183
184 // waw3: 6TB SAS 3.5" HDDs
185 waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
186 spec: {
187 mon: {
188 count: 3,
189 allowMultiplePerNode: false,
190 },
191 storage: {
192 useAllNodes: false,
193 useAllDevices: false,
194 config: {
195 databaseSizeMB: "1024",
196 journalSizeMB: "1024",
197 },
198 nodes: [
199 {
200 name: "dcr01s22.hswaw.net",
201 location: "rack=dcr01 host=dcr01s22",
202 devices: [
203 // https://github.com/rook/rook/issues/1228
204 //{ name: "disk/by-id/wwan-0x" + wwan }
205 //for wwan in [
206 // "5000c5008508c433",
207 // "5000c500850989cf",
208 // "5000c5008508f843",
209 // "5000c5008508baf7",
210 //]
211 { name: "sdn" },
212 { name: "sda" },
213 { name: "sdb" },
214 { name: "sdc" },
215 ],
216 },
217 {
218 name: "dcr01s24.hswaw.net",
219 location: "rack=dcr01 host=dcr01s22",
220 devices: [
221 // https://github.com/rook/rook/issues/1228
222 //{ name: "disk/by-id/wwan-0x" + wwan }
223 //for wwan in [
224 // "5000c5008508ee03",
225 // "5000c5008508c9ef",
226 // "5000c5008508df33",
227 // "5000c5008508dd3b",
228 //]
229 { name: "sdm" },
230 { name: "sda" },
231 { name: "sdb" },
232 { name: "sdc" },
233 ],
234 },
235 ],
236 },
237 benji:: {
238 metadataStorageClass: "waw-hdd-redundant-3",
239 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
240 pools: [
241 "waw-hdd-redundant-3",
242 "waw-hdd-redundant-3-metadata",
243 "waw-hdd-yolo-3",
244 ],
245 s3Configuration: {
246 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
247 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
248 bucketName: "benji-k0-backups-waw3",
249 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
250 },
251 }
252 },
253 },
254 waw3Pools: {
255 // redundant block storage
256 blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") {
257 metadataReplicas: 2,
258 spec: {
259 failureDomain: "host",
260 replicated: {
261 size: 2,
262 },
263 },
264 },
265 // yolo block storage (low usage, no host redundancy)
266 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw3, "waw-hdd-yolo-3") {
267 spec: {
268 failureDomain: "osd",
269 erasureCoded: {
270 dataChunks: 12,
271 codingChunks: 4,
272 },
273 },
274 },
Serge Bazanski242ec582020-09-20 15:36:11 +0000275 // q3k's personal pool, used externally from k8s.
276 q3kRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-q3k-3") {
277 metadataReplicas: 2,
278 spec: {
279 failureDomain: "host",
280 replicated: {
281 size: 2,
282 },
283 },
284 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200285 objectRedundant: rook.S3ObjectStore(k0.ceph.waw3, "waw-hdd-redundant-3-object") {
286 spec: {
287 metadataPool: {
288 failureDomain: "host",
289 replicated: { size: 2 },
290 },
291 dataPool: {
292 failureDomain: "host",
293 replicated: { size: 2 },
294 },
295 },
296 },
297 },
298
299 // Clients for S3/radosgw storage.
300 clients: {
301 # Used for owncloud.hackerspace.pl, which for now lives on boston-packets.hackerspace.pl.
302 nextcloudWaw3: kube.CephObjectStoreUser("nextcloud") {
303 metadata+: {
304 namespace: "ceph-waw3",
305 },
306 spec: {
307 store: "waw-hdd-redundant-3-object",
308 displayName: "nextcloud",
309 },
310 },
311
312 # nuke@hackerspace.pl's personal storage.
313 nukePersonalWaw3: kube.CephObjectStoreUser("nuke-personal") {
314 metadata+: {
315 namespace: "ceph-waw3",
316 },
317 spec: {
318 store: "waw-hdd-redundant-3-object",
319 displayName: "nuke-personal",
320 },
321 },
322
323 # patryk@hackerspace.pl's ArmA3 mod bucket.
324 cz2ArmaModsWaw3: kube.CephObjectStoreUser("cz2-arma3mods") {
325 metadata+: {
326 namespace: "ceph-waw3",
327 },
328 spec: {
329 store: "waw-hdd-redundant-3-object",
330 displayName: "cz2-arma3mods",
331 },
332 },
Bartosz Stebeld9df5872020-06-13 21:19:40 +0200333 # Buckets for spark pipelines
334 # TODO(implr): consider a second yolo-backed one for temp data
335 implrSparkWaw3: kube.CephObjectStoreUser("implr-spark") {
336 metadata+: {
337 namespace: "ceph-waw3",
338 },
339 spec: {
340 store: "waw-hdd-redundant-3-object",
341 displayName: "implr-spark",
342 },
343 },
Sergiusz Bazanskib1aadd82020-06-24 19:06:17 +0200344 # q3k's personal user
345 q3kWaw3: kube.CephObjectStoreUser("q3k") {
346 metadata+: {
347 namespace: "ceph-waw3",
348 },
349 spec: {
350 store: "waw-hdd-redundant-3-object",
351 displayName: "q3k",
352 },
353 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100354 # woju's personal user
355 wojuWaw3: kube.CephObjectStoreUser("woju") {
356 metadata+: {
357 namespace: "ceph-waw3",
358 },
359 spec: {
360 store: "waw-hdd-redundant-3-object",
361 displayName: "woju",
362 },
Patryk Jakuszewcae7cf72020-11-28 14:36:48 +0100363 },
Patryk Jakuszew34668a52020-11-28 13:45:25 +0100364 # cz3's (patryk@hackerspace.pl) personal user
365 cz3Waw3: kube.CephObjectStoreUser("cz3") {
366 metadata+: {
367 namespace: "ceph-waw3",
368 },
369 spec: {
370 store: "waw-hdd-redundant-3-object",
371 displayName: "cz3",
372 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100373 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200374 },
375 },
376
377
378 # These are policies allowing for Insecure pods in some namespaces.
379 # A lot of them are spurious and come from the fact that we deployed
380 # these namespaces before we deployed the draconian PodSecurityPolicy
381 # we have now. This should be fixed by setting up some more granular
382 # policies, or fixing the workloads to not need some of the permission
383 # bits they use, whatever those might be.
384 # TODO(q3k): fix this?
385 unnecessarilyInsecureNamespaces: [
386 policies.AllowNamespaceInsecure("ceph-waw2"),
387 policies.AllowNamespaceInsecure("ceph-waw3"),
388 policies.AllowNamespaceInsecure("matrix"),
389 policies.AllowNamespaceInsecure("registry"),
390 policies.AllowNamespaceInsecure("internet"),
391 # TODO(implr): restricted policy with CAP_NET_ADMIN and tuntap, but no full root
392 policies.AllowNamespaceInsecure("implr-vpn"),
393 ],
394 },
395}