cluster/nix: locally build nixos derivations

We change the existing behaviour (copy files & run nixos-rebuild switch)
to something closer to nixops-style. This now means that provisioning
admin machines need Nix installed locally, but that's probably an okay
choice to make.

The upside of this approach is that it's easier to debug and test
derivations, as all data is local to the repo and the workstation, and
deploying just means copying a configuration closure and switching the
system to it. At some point we should even be able to run the entire
cluster within a set of test VMs.

We also bump the kubernetes control plane to 1.14. Kubelets are still at
1.13 and their upgrade is comint up today too.

Change-Id: Ia9832c47f258ee223d93893d27946d1161cc4bbd
diff --git a/cluster/clustercfg/clustercfg.py b/cluster/clustercfg/clustercfg.py
index e722441..eb9f52d 100644
--- a/cluster/clustercfg/clustercfg.py
+++ b/cluster/clustercfg/clustercfg.py
@@ -26,7 +26,6 @@
 
 
 cluster = 'k0.hswaw.net'
-remote_root = '/opt/hscloud'
 ss = secretstore.SecretStore(
     plain_root=os.path.join(local_root, 'cluster/secrets/plain'),
     cipher_root=os.path.join(local_root, 'cluster/secrets/cipher'))
@@ -41,10 +40,11 @@
 
 
 
-def pki_config(key):
+def pki_config(key, fqdn):
+    machine_name = fqdn.split('.')[0]
     raw = subprocess.check_output([
         'nix', 'eval', '--raw',
-        '( (import ' + local_root + '/cluster/nix/toplevel.nix ).pki.' + key + '.json )',
+        '( ((import ' + local_root + '/cluster/nix/defs-cluster-k0.nix ) "' + machine_name + '").pki.' + key + '.json )',
     ])
     return json.loads(raw)
 
@@ -151,57 +151,45 @@
     r = fabric.Connection('root@{}'.format(fqdn))
 
     if not nocerts:
-        cfg = dict((k, pki_config(k)) for k in [
+        cfg = dict((k, pki_config(k, fqdn)) for k in [
             'etcdPeer', 'etcd.server', 'etcd.kube'
         ])
         certs_root = os.path.join(local_root, 'cluster/certs')
 
         # Make etcd peer certificate for node.
         ca_etcd_peer = ca.CA(ss, certs_root, 'etcdpeer', 'etcd peer ca')
-        ca_etcd_peer.upload(r, cfg['etcdPeer']['ca'])
-        c = ca_etcd_peer.make_cert('etcdpeer-{}'.format(fqdn), hosts=[fqdn], ou='node etcd peer certificate')
-        c.upload_pki(r, cfg['etcdPeer'])
+        ca_etcd_peer.make_cert('etcdpeer-{}'.format(fqdn), hosts=[fqdn], ou='node etcd peer certificate')
 
         # Make etcd server certificate for node and client certificate for kube.
         ca_etcd = ca.CA(ss, certs_root, 'etcd', 'etcd ca')
-        ca_etcd.upload(r, cfg['etcd.server']['ca'])
 
-        c = ca_etcd.make_cert('etcd-{}'.format(fqdn), hosts=[fqdn], ou='node etcd server certificate')
-        c.upload_pki(r, cfg['etcd.server'])
+        ca_etcd.make_cert('etcd-{}'.format(fqdn), hosts=[fqdn], ou='node etcd server certificate')
 
-        c = ca_etcd.make_cert('etcd-kube', hosts=['kube'], ou='kube etcd client certificate')
-        c.upload_pki(r, cfg['etcd.kube'])
+        ca_etcd.make_cert('etcd-kube', hosts=['kube'], ou='kube etcd client certificate')
 
-        # Make root etcd client (do not upload).
         ca_etcd.make_cert('etcd-root', hosts=['root'], ou='root etcd client certificate')
 
-        # Make calico etcd client (do not upload, used by jsonnet).
         ca_etcd.make_cert('etcd-calico', hosts=['calico'], ou='root etcd client certificate')
 
         ## Make kube certificates.
         ca_kube = ca.CA(ss, certs_root, 'kube', 'kubernetes main CA')
 
         # Make prodvider intermediate CA.
-        c = ca_kube.make_cert('ca-kube-prodvider', o='Warsaw Hackerspace', ou='kubernetes prodvider intermediate', hosts=['kubernetes prodvider intermediate CA'], profile='intermediate')
-        c.ensure()
+        ca_kube.make_cert('ca-kube-prodvider', o='Warsaw Hackerspace', ou='kubernetes prodvider intermediate', hosts=['kubernetes prodvider intermediate CA'], profile='intermediate')
 
         # Make kubelet certificate (per node).
-        c = ca_kube.make_cert('kube-kubelet-'+fqdn, o='system:nodes', ou='Kubelet', hosts=['system:node:'+fqdn, fqdn])
-        c.upload_pki(r, pki_config('kube.kubelet'))
+        ca_kube.make_cert('kube-kubelet-'+fqdn, o='system:nodes', ou='Kubelet', hosts=['system:node:'+fqdn, fqdn])
 
         # Make apiserver certificate.
-        c = ca_kube.make_cert('kube-apiserver', ou='Kubernetes API', hosts=[cluster, 'kubernetes.default.svc.'+cluster, '10.10.12.1'])
-        c.upload_pki(r, pki_config('kube.apiserver'), concat_ca=True)
+        ca_kube.make_cert('kube-apiserver', ou='Kubernetes API', hosts=[cluster, 'kubernetes.default.svc.'+cluster, '10.10.12.1'])
 
         # Make service accounts decryption key (as cert for consistency).
-        c = ca_kube.make_cert('kube-serviceaccounts', ou='Kubernetes Service Accounts Signer', hosts=['serviceaccounts'])
-        c.upload_pki(r, pki_config('kube.serviceaccounts'))
+        ca_kube.make_cert('kube-serviceaccounts', ou='Kubernetes Service Accounts Signer', hosts=['serviceaccounts'])
 
         # Make kube component certificates.
         kube_components = ['controllermanager', 'scheduler', 'proxy']
-        cfg = dict((k, pki_config('kube.' + k)) for k in kube_components)
+        cfg = dict((k, pki_config('kube.' + k, fqdn)) for k in kube_components)
         for k in kube_components:
-            ca_kube.upload(r, cfg[k]['ca'])
             # meh 
             if k == 'controllermanager':
                 o = 'system:kube-controller-manager'
@@ -209,24 +197,20 @@
                 o = 'system:kube-'+k
             ou = 'Kubernetes Component '+k
             c = ca_kube.make_cert('kube-'+k, ou=ou, o=o, hosts=[o,])
-            c.upload_pki(r, cfg[k])
 
         ## Make kubefront certificates.
         ca_kubefront = ca.CA(ss, certs_root, 'kubefront', 'kubernetes frontend CA')
-        ca_kubefront.upload(r, pki_config('kubeFront.apiserver')['ca'])
-        c = ca_kubefront.make_cert('kubefront-apiserver', ou='Kubernetes Frontend', hosts=['apiserver'])
-        c.upload_pki(r, pki_config('kubeFront.apiserver'))
+        ca_kubefront.make_cert('kubefront-apiserver', ou='Kubernetes Frontend', hosts=['apiserver'])
 
-    # Upload NixOS config
-    for f in ['toplevel', 'cluster-configuration']:
-        r.put(local=os.path.join(local_root, 'cluster/nix/{}.nix'.format(f)),
-              remote='/etc/nixos/{}.nix'.format(f))
-
-    r.run('nixos-rebuild switch')
+    subprocess.check_call(["nix", "run",
+                           "-f", os.path.join(local_root, "cluster/nix/default.nix"),
+                           "provision",
+                           "-c", "provision-{}".format(fqdn.split('.')[0]),
+                           "switch"])
 
 
 def usage():
-    sys.stderr.write("Usage: clustercfg <nodestrap|admincreds|config>\n")
+    sys.stderr.write("Usage: clustercfg <nodestrap|admincreds>\n")
 
 
 def main():
@@ -241,13 +225,6 @@
         return nodestrap(sys.argv[2:], nocerts=True)
     elif mode == "admincreds":
         return admincreds(sys.argv[2:])
-    elif mode == "config":
-        print('etcd peer:')
-        print(json.dumps(pki_config('etcdPeer'), indent=2))
-        print('etcd server:')
-        print(json.dumps(pki_config('etcd.server'), indent=2))
-        print('etcd client (kube):')
-        print(json.dumps(pki_config('etcd.kube'), indent=2))
     else:
         usage()
         return 1