cluster: deploy NixOS-based ceph

First pass at a non-rook-managed Ceph cluster. We call it k0 instead of
ceph-waw4, as we pretty much are sure now that we will always have a
one-kube-cluster-to-one-ceph-cluster correspondence, with different Ceph
pools for different media kinds (if at all).

For now this has one mon and spinning rust OSDs. This can be iterated on
to make it less terrible with time.

See b/6 for more details.

Change-Id: Ie502a232c700af93f33fcad9fa1c57058161aa11
diff --git a/cluster/nix/defs-cluster-k0.nix b/cluster/nix/defs-cluster-k0.nix
index c3519cc..cd0fcac 100644
--- a/cluster/nix/defs-cluster-k0.nix
+++ b/cluster/nix/defs-cluster-k0.nix
@@ -10,8 +10,60 @@
   fqdn = machineName + domain;
   machine = (builtins.head (builtins.filter (n: n.fqdn == fqdn) machines));
   otherMachines = (builtins.filter (n: n.fqdn != fqdn) machines);
+  machinesByName = builtins.listToAttrs (map (m: { name = m.name; value = m; }) machines);
   inherit machines;
 
+  # Ceph cluster to run systemd modules for.
+  cephCluster = {
+    fsid = "74592dc2-31b7-4dbe-88cf-40459dfeb354";
+    name = "k0";
+
+    # Map from node name to mon configuration (currently always empty).
+    #
+    # Each mon also runs a mgr daemon (which is a leader-elected kitchen
+    # sink^W^Whousekeeping service hanging off of a mon cluster).
+    #
+    # Consult the Ceph documentation
+    # (https://docs.ceph.com/en/pacific/rados/operations/add-or-rm-mons/) on
+    # how to actually carry out mon-related maintenance operations.
+    mons = {
+      bc01n02 = {};
+    };
+
+    # Map from node name to list of disks on node.
+    # Each disk is:
+    #  id:   OSD numerical ID, eg. 0 for osd.0. You get this after running
+    #        ceph-lvm volume create.
+    #  path: Filesystem path for disk backing drive. This should be something
+    #        in /dev/disk/by-id for safety. This is only used to gate OSD
+    #        daemon startup by disk presence.
+    #  uuid: OSD uuid/fsid. You get this after running ceph-lvm volume create.
+    #
+    # Quick guide how to set up a new OSD (but please refer to the Ceph manual):
+    # 0. Copy /var/lib/ceph/bootstrap-osd/k0.keyring from another OSD node to
+    #    the new OSD node, if this is a new node. Remember to chown ceph:ceph
+    #    chmod 0600!
+    # 1. nix-shell -p ceph lvm2 cryptsetup (if on a node that's not yet an OSD)
+    # 2. ceph-volume --cluster k0 lvm create --bluestore --data /dev/sdX --no-systemd --dmcrypt
+    # 3. The above will mount a tmpfs on /var/lib/ceph/osd/k0-X. X is the new
+    #    osd id. A file named fsid inside this directory is the new OSD fsid/uuid.
+    # 4. Configure osds below with the above information, redeploy node from nix.
+    osds = {
+      dcr01s22 = [
+        { id = 0; path = "/dev/disk/by-id/scsi-35000c500850293e3"; uuid = "314034c5-474c-4d0d-ba41-36a881c52560";}
+        { id = 1; path = "/dev/disk/by-id/scsi-35000c500850312cb"; uuid = "a7f1baa0-0fc3-4ab1-9895-67abdc29de03";}
+        { id = 2; path = "/dev/disk/by-id/scsi-35000c5008508e3ef"; uuid = "11ac8316-6a87-48a7-a0c7-74c3cef6c2fa";}
+        { id = 3; path = "/dev/disk/by-id/scsi-35000c5008508e23f"; uuid = "c6b838d1-b08c-4788-936c-293041ed2d4d";}
+      ];
+      dcr01s24 = [
+        { id = 4; path = "/dev/disk/by-id/scsi-35000c5008509199b"; uuid = "a2b4663d-bd8f-49b3-b0b0-195c56ba252f";}
+        { id = 5; path = "/dev/disk/by-id/scsi-35000c50085046abf"; uuid = "a2242989-ccce-4367-8813-519b64b5afdb";}
+        { id = 6; path = "/dev/disk/by-id/scsi-35000c5008502929b"; uuid = "7deac89c-22dd-4c2b-b3cc-43ff7f990fd6";}
+        { id = 7; path = "/dev/disk/by-id/scsi-35000c5008502a323"; uuid = "e305ebb3-9cac-44d2-9f1d-bbb72c8ab51f";}
+      ];
+    };
+  };
+
   pki = rec {
     make = (radix: name: rec {
       ca = ./../certs + "/ca-${radix}.crt";