cluster/kube: ceph-waw1 up
diff --git a/cluster/kube/cluster.jsonnet b/cluster/kube/cluster.jsonnet
index 5275f66..e308fdd 100644
--- a/cluster/kube/cluster.jsonnet
+++ b/cluster/kube/cluster.jsonnet
@@ -61,8 +61,43 @@
},
// Main nginx Ingress Controller
nginx: nginx.Environment {},
+
// Rook Ceph storage
- rook: rook.Environment {},
+ rook: rook.Operator {},
+ // waw1 ceph cluster
+ cephWaw1: rook.Cluster(cluster.rook, "ceph-waw1") {
+ spec: {
+ mon: {
+ count: 3,
+ allowMultiplePerNode: false,
+ },
+ storage: {
+ useAllNodes: false,
+ useAllDevices: false,
+ config: {
+ databaseSizeMB: "1024",
+ journalSizeMB: "1024",
+ },
+ nodes: [
+ {
+ name: "bc01n01.hswaw.net",
+ location: "rack=dcr01 chassis=bc01 host=bc01n01",
+ devices: [ { name: "sda" } ],
+ },
+ {
+ name: "bc01n02.hswaw.net",
+ location: "rack=dcr01 chassis=bc01 host=bc01n02",
+ devices: [ { name: "sda" } ],
+ },
+ {
+ name: "bc01n03.hswaw.net",
+ location: "rack=dcr01 chassis=bc01 host=bc01n03",
+ devices: [ { name: "sda" } ],
+ },
+ ],
+ },
+ },
+ },
};
diff --git a/cluster/kube/lib/rook.libsonnet b/cluster/kube/lib/rook.libsonnet
index 13e3f56..71b6a55 100644
--- a/cluster/kube/lib/rook.libsonnet
+++ b/cluster/kube/lib/rook.libsonnet
@@ -3,7 +3,7 @@
local kube = import "../../../kube/kube.libsonnet";
{
- Environment: {
+ Operator: {
local env = self,
local cfg = env.cfg,
cfg:: {
@@ -127,6 +127,7 @@
cephnfses: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephNFS") {
spec+: {
names+: {
+ plural: "cephnfses",
shortNames: ["nfs"],
},
},
@@ -149,9 +150,7 @@
crs: {
clusterMgmt: kube.ClusterRole("rook-ceph-cluster-mgmt") {
- metadata+: env.metadata {
- namespace:: null,
- },
+ metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
@@ -166,9 +165,7 @@
],
},
global: kube.ClusterRole("rook-ceph-global") {
- metadata+: env.metadata {
- namespace:: null,
- },
+ metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
@@ -186,6 +183,11 @@
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
{
+ apiGroups: ["batch"],
+ resources: ["jobs"],
+ verbs: ["get", "list", "watch", "create", "update", "delete"],
+ },
+ {
apiGroups: ["ceph.rook.io"],
resources: ["*"],
verbs: ["*"],
@@ -198,9 +200,7 @@
],
},
mgrCluster: kube.ClusterRole("rook-ceph-mgr-cluster") {
- metadata+: env.metadata {
- namespace:: null,
- },
+ metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
@@ -212,7 +212,7 @@
},
crb: kube.ClusterRoleBinding("ceph-rook-global") {
- metadata+: env.metadata,
+ metadata+: env.metadata { namespace:: null },
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
@@ -276,7 +276,7 @@
env_: {
LIB_MODULES_DIR_PATH: "/run/current-system/kernel-modules/lib/modules/",
ROOK_ALLOW_MULTIPLE_FILESYSTEMS: "false",
- ROOK_LOG_LEVEL: "info",
+ ROOK_LOG_LEVEL: "DEBUG",
ROOK_MON_HEALTHCHECK_INTERVAL: "45s",
ROOK_MON_OUT_TIMEOUT: "600s",
ROOK_DISCOVER_DEVICES_INTERVAL: "60m",
@@ -298,4 +298,160 @@
},
},
},
+
+ // Create a new Ceph cluster in a new namespace.
+ Cluster(operator, name):: {
+ local cluster = self,
+ spec:: error "please define cluster spec",
+
+
+ metadata:: {
+ namespace: name,
+ },
+
+ name(suffix):: cluster.metadata.namespace + "-" + suffix,
+
+ namespace: kube.Namespace(cluster.metadata.namespace),
+
+ sa: {
+ // service accounts need to be hardcoded, see operator source.
+ osd: kube.ServiceAccount("rook-ceph-osd") {
+ metadata+: cluster.metadata,
+ },
+ mgr: kube.ServiceAccount("rook-ceph-mgr") {
+ metadata+: cluster.metadata,
+ },
+ },
+
+ roles: {
+ osd: kube.Role(cluster.name("osd")) {
+ metadata+: cluster.metadata,
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["configmaps"],
+ verbs: ["get", "list", "watch", "create", "update", "delete"],
+ }
+ ],
+ },
+ mgr: kube.Role(cluster.name("mgr")) {
+ metadata+: cluster.metadata,
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["pods", "services"],
+ verbs: ["get", "list", "watch"],
+ },
+ {
+ apiGroups: ["batch"],
+ resources: ["jobs"],
+ verbs: ["get", "list", "watch", "create", "update", "delete"],
+ },
+ {
+ apiGroups: ["ceph.rook.io"],
+ resources: ["*"],
+ verbs: ["*"],
+ },
+ ],
+ },
+ mgrSystem: kube.ClusterRole(cluster.name("mgr-system")) {
+ metadata+: cluster.metadata { namespace:: null },
+ rules: [
+ {
+ apiGroups: [""],
+ resources: ["configmaps"],
+ verbs: ["get", "list", "watch"],
+ }
+ ],
+ },
+ },
+
+ rbs: [
+ kube.RoleBinding(cluster.name(el.name)) {
+ metadata+: cluster.metadata,
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: el.role.kind,
+ name: el.role.metadata.name,
+ },
+ subjects: [
+ {
+ kind: el.sa.kind,
+ name: el.sa.metadata.name,
+ namespace: el.sa.metadata.namespace,
+ },
+ ],
+ },
+ for el in [
+ // Allow Operator SA to perform Cluster Mgmt in this namespace.
+ { name: "cluster-mgmt", role: operator.crs.clusterMgmt, sa: operator.sa },
+ { name: "osd", role: cluster.roles.osd, sa: cluster.sa.osd },
+ { name: "mgr", role: cluster.roles.mgr, sa: cluster.sa.mgr },
+ { name: "mgr-cluster", role: operator.crs.mgrCluster, sa: cluster.sa.mgr },
+ ]
+ ],
+
+ mgrSystemRB: kube.RoleBinding(cluster.name("mgr-system")) {
+ metadata+: {
+ namespace: operator.cfg.namespace,
+ },
+ roleRef: {
+ apiGroup: "rbac.authorization.k8s.io",
+ kind: cluster.roles.mgrSystem.kind,
+ name: cluster.roles.mgrSystem.metadata.name,
+ },
+ subjects: [
+ {
+ kind: cluster.sa.mgr.kind,
+ name: cluster.sa.mgr.metadata.name,
+ namespace: cluster.sa.mgr.metadata.namespace,
+ },
+ ],
+ },
+
+ cluster: kube._Object("ceph.rook.io/v1", "CephCluster", name) {
+ metadata+: cluster.metadata,
+ spec: {
+ cephVersion: {
+ image: "ceph/ceph:v13.2.5-20190319",
+ },
+ dataDirHostPath: "/var/lib/rook",
+ dashboard: {
+ ssl: false,
+ enabled: true,
+ port: 8080,
+ },
+ } + cluster.spec,
+ },
+
+ dashboardService: kube.Service(cluster.name("dashboard")) {
+ metadata+: cluster.metadata,
+ spec: {
+ ports: [
+ { name: "dashboard", port: 80, targetPort: 8080, protocol: "TCP" },
+ ],
+ selector: {
+ app: "rook-ceph-mgr",
+ rook_cluster: name,
+ },
+ type: "ClusterIP",
+ },
+ },
+
+ dashboardIngress: kube.Ingress(cluster.name("dashboard")) {
+ metadata+: cluster.metadata,
+ spec+: {
+ rules: [
+ {
+ host: "%s.hswaw.net" % name,
+ http: {
+ paths: [
+ { path: "/", backend: cluster.dashboardService.name_port },
+ ]
+ },
+ }
+ ],
+ },
+ },
+ },
}