blob: 50f9d188eb6c3df4450f8cd80e47f187042a5ef9 [file] [log] [blame]
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +02001// k0.hswaw.net kubernetes cluster
2// This defines the cluster as a single object.
3// Use the sibling k0*.jsonnet 'view' files to actually apply the configuration.
4
5local kube = import "../../kube/kube.libsonnet";
6local policies = import "../../kube/policies.libsonnet";
7
8local cluster = import "cluster.libsonnet";
9
10local cockroachdb = import "lib/cockroachdb.libsonnet";
11local registry = import "lib/registry.libsonnet";
12local rook = import "lib/rook.libsonnet";
13
14{
15 k0: {
16 local k0 = self,
17 cluster: cluster.Cluster("k0", "hswaw.net") {
18 cfg+: {
Serge Bazanski3d294842020-08-04 01:34:07 +020019 storageClassNameParanoid: k0.ceph.waw3Pools.blockRedundant.name,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020020 },
21 metallb+: {
22 cfg+: {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000023 // Peer with calico running on same node.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020024 peers: [
25 {
Serge Bazanskia5ed6442020-09-20 22:52:57 +000026 "peer-address": "127.0.0.1",
27 "peer-asn": 65003,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020028 "my-asn": 65002,
29 },
30 ],
Serge Bazanskia5ed6442020-09-20 22:52:57 +000031 // Public IP address pools. Keep in sync with k0.calico.yaml.
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020032 addressPools: [
33 {
34 name: "public-v4-1",
35 protocol: "bgp",
36 addresses: [
37 "185.236.240.48/28",
38 ],
39 },
40 {
41 name: "public-v4-2",
42 protocol: "bgp",
43 addresses: [
44 "185.236.240.112/28"
45 ],
46 },
47 ],
48 },
49 },
50 },
51
52 // Docker registry
53 registry: registry.Environment {
54 cfg+: {
55 domain: "registry.%s" % [k0.cluster.fqdn],
56 storageClassName: k0.cluster.cfg.storageClassNameParanoid,
Serge Bazanski3d294842020-08-04 01:34:07 +020057 objectStorageName: "waw-hdd-redundant-3-object",
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020058 },
59 },
60
61 // CockroachDB, running on bc01n{01,02,03}.
62 cockroach: {
63 waw2: cockroachdb.Cluster("crdb-waw1") {
64 cfg+: {
65 topology: [
66 { name: "bc01n01", node: "bc01n01.hswaw.net" },
67 { name: "bc01n02", node: "bc01n02.hswaw.net" },
68 { name: "bc01n03", node: "bc01n03.hswaw.net" },
69 ],
70 // Host path on SSD.
71 hostPath: "/var/db/crdb-waw1",
Serge Bazanski509ab6e2020-07-30 22:43:20 +020072 extraDNS: [
73 "crdb-waw1.hswaw.net",
74 ],
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020075 },
76 },
77 clients: {
78 cccampix: k0.cockroach.waw2.Client("cccampix"),
79 cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"),
80 buglessDev: k0.cockroach.waw2.Client("bugless-dev"),
81 sso: k0.cockroach.waw2.Client("sso"),
Serge Bazanski509ab6e2020-07-30 22:43:20 +020082 herpDev: k0.cockroach.waw2.Client("herp-dev"),
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020083 },
84 },
85
86 ceph: {
87 // waw1 cluster - dead as of 2019/08/06, data corruption
88 // waw2 cluster: shitty 7200RPM 2.5" HDDs
89 waw2: rook.Cluster(k0.cluster.rook, "ceph-waw2") {
90 spec: {
Serge Bazanski3b9ee5f2021-01-19 22:44:07 +010091 // This cluster is quite broken. We're just keeping it around
92 // for the hell of it.
93 continueUpgradeAfterChecksEvenIfNotHealthy: true,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +020094 mon: {
95 count: 3,
96 allowMultiplePerNode: false,
97 },
98 storage: {
99 useAllNodes: false,
100 useAllDevices: false,
101 config: {
102 databaseSizeMB: "1024",
103 journalSizeMB: "1024",
104 },
105 nodes: [
106 {
107 name: "bc01n01.hswaw.net",
108 location: "rack=dcr01 chassis=bc01 host=bc01n01",
109 devices: [ { name: "sda" } ],
110 },
111 {
112 name: "bc01n02.hswaw.net",
113 location: "rack=dcr01 chassis=bc01 host=bc01n02",
114 devices: [ { name: "sda" } ],
115 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200116 ],
117 },
118 benji:: {
119 metadataStorageClass: "waw-hdd-paranoid-2",
120 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
121 pools: [
122 "waw-hdd-redundant-2",
123 "waw-hdd-redundant-2-metadata",
124 "waw-hdd-paranoid-2",
125 "waw-hdd-yolo-2",
126 ],
127 s3Configuration: {
128 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
129 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
130 bucketName: "benji-k0-backups",
131 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
132 },
133 }
134 },
135 },
136 waw2Pools: {
137 // redundant block storage
138 blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
139 spec: {
140 failureDomain: "host",
141 erasureCoded: {
142 dataChunks: 2,
143 codingChunks: 1,
144 },
145 },
146 },
147 // paranoid block storage (3 replicas)
148 blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
149 spec: {
150 failureDomain: "host",
151 replicated: {
152 size: 3,
153 },
154 },
155 },
156 // yolo block storage (no replicas!)
157 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
158 spec: {
159 failureDomain: "host",
160 replicated: {
161 size: 1,
162 },
163 },
164 },
165 objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
166 spec: {
167 metadataPool: {
168 failureDomain: "host",
169 replicated: { size: 3 },
170 },
171 dataPool: {
172 failureDomain: "host",
173 erasureCoded: {
174 dataChunks: 2,
175 codingChunks: 1,
176 },
177 },
178 },
179 },
180 },
181
182 // waw3: 6TB SAS 3.5" HDDs
183 waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
184 spec: {
185 mon: {
Serge Bazanskicf842b02021-01-19 20:08:23 +0100186 count: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200187 allowMultiplePerNode: false,
188 },
189 storage: {
190 useAllNodes: false,
191 useAllDevices: false,
192 config: {
193 databaseSizeMB: "1024",
194 journalSizeMB: "1024",
195 },
196 nodes: [
197 {
198 name: "dcr01s22.hswaw.net",
199 location: "rack=dcr01 host=dcr01s22",
200 devices: [
201 // https://github.com/rook/rook/issues/1228
202 //{ name: "disk/by-id/wwan-0x" + wwan }
203 //for wwan in [
204 // "5000c5008508c433",
205 // "5000c500850989cf",
206 // "5000c5008508f843",
207 // "5000c5008508baf7",
208 //]
209 { name: "sdn" },
210 { name: "sda" },
211 { name: "sdb" },
212 { name: "sdc" },
213 ],
214 },
215 {
216 name: "dcr01s24.hswaw.net",
217 location: "rack=dcr01 host=dcr01s22",
218 devices: [
219 // https://github.com/rook/rook/issues/1228
220 //{ name: "disk/by-id/wwan-0x" + wwan }
221 //for wwan in [
222 // "5000c5008508ee03",
223 // "5000c5008508c9ef",
224 // "5000c5008508df33",
225 // "5000c5008508dd3b",
226 //]
227 { name: "sdm" },
228 { name: "sda" },
229 { name: "sdb" },
230 { name: "sdc" },
231 ],
232 },
233 ],
234 },
235 benji:: {
236 metadataStorageClass: "waw-hdd-redundant-3",
237 encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
238 pools: [
239 "waw-hdd-redundant-3",
240 "waw-hdd-redundant-3-metadata",
241 "waw-hdd-yolo-3",
242 ],
243 s3Configuration: {
244 awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
245 awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
246 bucketName: "benji-k0-backups-waw3",
247 endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
248 },
249 }
250 },
251 },
252 waw3Pools: {
253 // redundant block storage
254 blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") {
255 metadataReplicas: 2,
256 spec: {
257 failureDomain: "host",
258 replicated: {
259 size: 2,
260 },
261 },
262 },
263 // yolo block storage (low usage, no host redundancy)
264 blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw3, "waw-hdd-yolo-3") {
265 spec: {
266 failureDomain: "osd",
267 erasureCoded: {
Serge Bazanskicf842b02021-01-19 20:08:23 +0100268 dataChunks: 2,
269 codingChunks: 1,
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200270 },
271 },
272 },
Serge Bazanski242ec582020-09-20 15:36:11 +0000273 // q3k's personal pool, used externally from k8s.
274 q3kRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-q3k-3") {
275 metadataReplicas: 2,
276 spec: {
277 failureDomain: "host",
278 replicated: {
279 size: 2,
280 },
281 },
282 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200283 objectRedundant: rook.S3ObjectStore(k0.ceph.waw3, "waw-hdd-redundant-3-object") {
284 spec: {
285 metadataPool: {
286 failureDomain: "host",
287 replicated: { size: 2 },
288 },
289 dataPool: {
290 failureDomain: "host",
291 replicated: { size: 2 },
292 },
293 },
294 },
295 },
296
297 // Clients for S3/radosgw storage.
298 clients: {
299 # Used for owncloud.hackerspace.pl, which for now lives on boston-packets.hackerspace.pl.
300 nextcloudWaw3: kube.CephObjectStoreUser("nextcloud") {
301 metadata+: {
302 namespace: "ceph-waw3",
303 },
304 spec: {
305 store: "waw-hdd-redundant-3-object",
306 displayName: "nextcloud",
307 },
308 },
309
310 # nuke@hackerspace.pl's personal storage.
311 nukePersonalWaw3: kube.CephObjectStoreUser("nuke-personal") {
312 metadata+: {
313 namespace: "ceph-waw3",
314 },
315 spec: {
316 store: "waw-hdd-redundant-3-object",
317 displayName: "nuke-personal",
318 },
319 },
320
321 # patryk@hackerspace.pl's ArmA3 mod bucket.
322 cz2ArmaModsWaw3: kube.CephObjectStoreUser("cz2-arma3mods") {
323 metadata+: {
324 namespace: "ceph-waw3",
325 },
326 spec: {
327 store: "waw-hdd-redundant-3-object",
328 displayName: "cz2-arma3mods",
329 },
330 },
Bartosz Stebeld9df5872020-06-13 21:19:40 +0200331 # Buckets for spark pipelines
332 # TODO(implr): consider a second yolo-backed one for temp data
333 implrSparkWaw3: kube.CephObjectStoreUser("implr-spark") {
334 metadata+: {
335 namespace: "ceph-waw3",
336 },
337 spec: {
338 store: "waw-hdd-redundant-3-object",
339 displayName: "implr-spark",
340 },
341 },
Sergiusz Bazanskib1aadd82020-06-24 19:06:17 +0200342 # q3k's personal user
343 q3kWaw3: kube.CephObjectStoreUser("q3k") {
344 metadata+: {
345 namespace: "ceph-waw3",
346 },
347 spec: {
348 store: "waw-hdd-redundant-3-object",
349 displayName: "q3k",
350 },
351 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100352 # woju's personal user
353 wojuWaw3: kube.CephObjectStoreUser("woju") {
354 metadata+: {
355 namespace: "ceph-waw3",
356 },
357 spec: {
358 store: "waw-hdd-redundant-3-object",
359 displayName: "woju",
360 },
Patryk Jakuszewcae7cf72020-11-28 14:36:48 +0100361 },
Patryk Jakuszew34668a52020-11-28 13:45:25 +0100362 # cz3's (patryk@hackerspace.pl) personal user
363 cz3Waw3: kube.CephObjectStoreUser("cz3") {
364 metadata+: {
365 namespace: "ceph-waw3",
366 },
367 spec: {
368 store: "waw-hdd-redundant-3-object",
369 displayName: "cz3",
370 },
Serge Bazanskibfe9bb02020-10-27 20:50:50 +0100371 },
Sergiusz Bazanskidbfa9882020-06-06 01:21:45 +0200372 },
373 },
374
375
376 # These are policies allowing for Insecure pods in some namespaces.
377 # A lot of them are spurious and come from the fact that we deployed
378 # these namespaces before we deployed the draconian PodSecurityPolicy
379 # we have now. This should be fixed by setting up some more granular
380 # policies, or fixing the workloads to not need some of the permission
381 # bits they use, whatever those might be.
382 # TODO(q3k): fix this?
383 unnecessarilyInsecureNamespaces: [
384 policies.AllowNamespaceInsecure("ceph-waw2"),
385 policies.AllowNamespaceInsecure("ceph-waw3"),
386 policies.AllowNamespaceInsecure("matrix"),
387 policies.AllowNamespaceInsecure("registry"),
388 policies.AllowNamespaceInsecure("internet"),
389 # TODO(implr): restricted policy with CAP_NET_ADMIN and tuntap, but no full root
390 policies.AllowNamespaceInsecure("implr-vpn"),
391 ],
392 },
393}