prod{access,vider}: implement
Prodaccess/Prodvider allow issuing short-lived certificates for all SSO
users to access the kubernetes cluster.
Currently, all users get a personal-$username namespace in which they
have adminitrative rights. Otherwise, they get no access.
In addition, we define a static CRB to allow some admins access to
everything. In the future, this will be more granular.
We also update relevant documentation.
Change-Id: Ia18594eea8a9e5efbb3e9a25a04a28bbd6a42153
diff --git a/cluster/prodvider/BUILD.bazel b/cluster/prodvider/BUILD.bazel
new file mode 100644
index 0000000..14690b7
--- /dev/null
+++ b/cluster/prodvider/BUILD.bazel
@@ -0,0 +1,64 @@
+load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "certs.go",
+ "kubernetes.go",
+ "main.go",
+ "service.go",
+ ],
+ importpath = "code.hackerspace.pl/hscloud/cluster/prodvider",
+ visibility = ["//visibility:private"],
+ deps = [
+ "//cluster/prodvider/proto:go_default_library",
+ "@com_github_cloudflare_cfssl//config:go_default_library",
+ "@com_github_cloudflare_cfssl//csr:go_default_library",
+ "@com_github_cloudflare_cfssl//signer:go_default_library",
+ "@com_github_cloudflare_cfssl//signer/local:go_default_library",
+ "@com_github_golang_glog//:go_default_library",
+ "@in_gopkg_ldap_v3//:go_default_library",
+ "@io_k8s_api//core/v1:go_default_library",
+ "@io_k8s_api//rbac/v1:go_default_library",
+ "@io_k8s_apimachinery//pkg/api/errors:go_default_library",
+ "@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
+ "@io_k8s_client_go//kubernetes:go_default_library",
+ "@io_k8s_client_go//rest:go_default_library",
+ "@org_golang_google_grpc//:go_default_library",
+ "@org_golang_google_grpc//codes:go_default_library",
+ "@org_golang_google_grpc//credentials:go_default_library",
+ "@org_golang_google_grpc//status:go_default_library",
+ ],
+)
+
+go_binary(
+ name = "prodvider",
+ embed = [":go_default_library"],
+ visibility = ["//visibility:public"],
+)
+
+container_layer(
+ name = "layer_bin",
+ files = [
+ ":prodvider",
+ ],
+ directory = "/cluster/prodvider/",
+)
+
+container_image(
+ name = "runtime",
+ base = "@prodimage-bionic//image",
+ layers = [
+ ":layer_bin",
+ ],
+)
+
+container_push(
+ name = "push",
+ image = ":runtime",
+ format = "Docker",
+ registry = "registry.k0.hswaw.net",
+ repository = "cluster/prodvider",
+ tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
+)
diff --git a/cluster/prodvider/certs.go b/cluster/prodvider/certs.go
new file mode 100644
index 0000000..bed0e48
--- /dev/null
+++ b/cluster/prodvider/certs.go
@@ -0,0 +1,112 @@
+package main
+
+import (
+ "crypto/tls"
+ "fmt"
+ "time"
+
+ "github.com/cloudflare/cfssl/csr"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/golang/glog"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+func (p *prodvider) selfCreds() grpc.ServerOption {
+ glog.Infof("Bootstrapping certificate for self (%q)...", flagProdviderCN)
+
+ // Create a key and CSR.
+ csrPEM, keyPEM, err := p.makeSelfCSR()
+ if err != nil {
+ glog.Exitf("Could not generate key and CSR for self: %v", err)
+ }
+
+ // Create a cert
+ certPEM, err := p.makeSelfCertificate(csrPEM)
+ if err != nil {
+ glog.Exitf("Could not sign certificate for self: %v", err)
+ }
+
+ serverCert, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ glog.Exitf("Could not use gRPC certificate: %v", err)
+ }
+
+ signerCert, _ := p.sign.Certificate("", "")
+ serverCert.Certificate = append(serverCert.Certificate, signerCert.Raw)
+
+ return grpc.Creds(credentials.NewTLS(&tls.Config{
+ Certificates: []tls.Certificate{serverCert},
+ }))
+}
+
+func (p *prodvider) makeSelfCSR() ([]byte, []byte, error) {
+ signerCert, _ := p.sign.Certificate("", "")
+ req := &csr.CertificateRequest{
+ CN: flagProdviderCN,
+ KeyRequest: &csr.BasicKeyRequest{
+ A: "rsa",
+ S: 4096,
+ },
+ Names: []csr.Name{
+ {
+ C: signerCert.Subject.Country[0],
+ ST: signerCert.Subject.Province[0],
+ L: signerCert.Subject.Locality[0],
+ O: signerCert.Subject.Organization[0],
+ OU: signerCert.Subject.OrganizationalUnit[0],
+ },
+ },
+ }
+
+ g := &csr.Generator{
+ Validator: func(req *csr.CertificateRequest) error { return nil },
+ }
+
+ return g.ProcessRequest(req)
+}
+
+func (p *prodvider) makeSelfCertificate(csr []byte) ([]byte, error) {
+ req := signer.SignRequest{
+ Hosts: []string{},
+ Request: string(csr),
+ Profile: "server",
+ }
+ return p.sign.Sign(req)
+}
+
+func (p *prodvider) makeKubernetesCSR(username, o string) ([]byte, []byte, error) {
+ signerCert, _ := p.sign.Certificate("", "")
+ req := &csr.CertificateRequest{
+ CN: username,
+ KeyRequest: &csr.BasicKeyRequest{
+ A: "rsa",
+ S: 4096,
+ },
+ Names: []csr.Name{
+ {
+ C: signerCert.Subject.Country[0],
+ ST: signerCert.Subject.Province[0],
+ L: signerCert.Subject.Locality[0],
+ O: o,
+ OU: fmt.Sprintf("Prodvider Kubernetes Cert for %s/%s", username, o),
+ },
+ },
+ }
+
+ g := &csr.Generator{
+ Validator: func(req *csr.CertificateRequest) error { return nil },
+ }
+
+ return g.ProcessRequest(req)
+}
+
+func (p *prodvider) makeKubernetesCertificate(csr []byte, notAfter time.Time) ([]byte, error) {
+ req := signer.SignRequest{
+ Hosts: []string{},
+ Request: string(csr),
+ Profile: "client",
+ NotAfter: notAfter,
+ }
+ return p.sign.Sign(req)
+}
diff --git a/cluster/prodvider/kubernetes.go b/cluster/prodvider/kubernetes.go
new file mode 100644
index 0000000..3386625
--- /dev/null
+++ b/cluster/prodvider/kubernetes.go
@@ -0,0 +1,205 @@
+package main
+
+import (
+ "encoding/pem"
+ "fmt"
+ "time"
+
+ "github.com/golang/glog"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+
+ pb "code.hackerspace.pl/hscloud/cluster/prodvider/proto"
+)
+
+func (p *prodvider) kubernetesCreds(username string) (*pb.KubernetesKeys, error) {
+ o := fmt.Sprintf("sso:%s", username)
+
+ csrPEM, keyPEM, err := p.makeKubernetesCSR(username+"@hackerspace.pl", o)
+ if err != nil {
+ return nil, err
+ }
+
+ certPEM, err := p.makeKubernetesCertificate(csrPEM, time.Now().Add(13*time.Hour))
+ if err != nil {
+ return nil, err
+ }
+
+ caCert, _ := p.sign.Certificate("", "")
+ caPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})
+
+ // Build certificate chain from new cert and intermediate CA.
+ chainPEM := append(certPEM, caPEM...)
+
+ glog.Infof("Generated k8s certificate for %q", username)
+ return &pb.KubernetesKeys{
+ Cluster: "k0.hswaw.net",
+ // APIServerCA
+ Ca: p.kubeCAPEM,
+ // Chain of new cert + intermediate CA
+ Cert: chainPEM,
+ Key: keyPEM,
+ }, nil
+}
+
+func (p *prodvider) kubernetesConnect() error {
+ csrPEM, keyPEM, err := p.makeKubernetesCSR("prodvider", "system:masters")
+ if err != nil {
+ return err
+ }
+
+ certPEM, err := p.makeKubernetesCertificate(csrPEM, time.Now().Add(30*24*time.Hour))
+ if err != nil {
+ return err
+ }
+
+ caCert, _ := p.sign.Certificate("", "")
+
+ caPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})
+
+ glog.Infof("Generated k8s certificate for self (system:masters)")
+
+ // Build certificate chain from our cert and intermediate CA.
+ chainPEM := append(certPEM, caPEM...)
+
+ config := &rest.Config{
+ Host: flagKubernetesHost,
+ TLSClientConfig: rest.TLSClientConfig{
+ // Chain to authenticate ourselves (us + intermediate CA).
+ CertData: chainPEM,
+ KeyData: keyPEM,
+ // APIServer CA for verification.
+ CAData: p.kubeCAPEM,
+ },
+ }
+
+ cs, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return err
+ }
+
+ p.k8s = cs
+
+ return nil
+}
+
+// kubernetesSetupUser ensures that for a given SSO username we:
+// - have a personal-<username> namespace
+// - have a sso:<username>:personal rolebinding that binds
+// system:admin-namespace to the user within their personal namespace
+// - have a sso:<username>:global clusterrolebinding that binds
+// system:viewer to the user at cluster level
+func (p *prodvider) kubernetesSetupUser(username string) error {
+ namespace := "personal-" + username
+ if err := p.ensureNamespace(namespace); err != nil {
+ return err
+ }
+ if err := p.ensureRoleBindingPersonal(namespace, username); err != nil {
+ return err
+ }
+ if err := p.ensureClusterRoleBindingGlobal(username); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *prodvider) ensureNamespace(name string) error {
+ _, err := p.k8s.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
+ switch {
+ case err == nil:
+ // Already exists, nothing to do
+ return nil
+ case errors.IsNotFound(err):
+ break
+ default:
+ // Something went wrong.
+ return err
+ }
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+ _, err = p.k8s.CoreV1().Namespaces().Create(ns)
+ return err
+}
+
+func (p *prodvider) ensureRoleBindingPersonal(namespace, username string) error {
+ name := "sso:" + username + ":personal"
+ rb := &rbacv1.RoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "User",
+ Name: username + "@hackerspace.pl",
+ },
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "system:admin-namespace",
+ },
+ }
+
+ rbs := p.k8s.RbacV1().RoleBindings(namespace)
+ _, err := rbs.Get(name, metav1.GetOptions{})
+ switch {
+ case err == nil:
+ // Already exists, update.
+ _, err = rbs.Update(rb)
+ return err
+ case errors.IsNotFound(err):
+ // Create.
+ _, err = rbs.Create(rb)
+ return err
+ default:
+ // Something went wrong.
+ return err
+ }
+}
+
+func (p *prodvider) ensureClusterRoleBindingGlobal(username string) error {
+ name := "sso:" + username + ":global"
+ rb := &rbacv1.ClusterRoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "User",
+ Name: username + "@hackerspace.pl",
+ },
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "system:viewer",
+ },
+ }
+
+ crbs := p.k8s.RbacV1().ClusterRoleBindings()
+ _, err := crbs.Get(name, metav1.GetOptions{})
+ switch {
+ case err == nil:
+ // Already exists, update.
+ _, err = crbs.Update(rb)
+ return err
+ case errors.IsNotFound(err):
+ // Create.
+ _, err = crbs.Create(rb)
+ return err
+ default:
+ // Something went wrong.
+ return err
+ }
+}
diff --git a/cluster/prodvider/main.go b/cluster/prodvider/main.go
new file mode 100644
index 0000000..7222a86
--- /dev/null
+++ b/cluster/prodvider/main.go
@@ -0,0 +1,149 @@
+package main
+
+import (
+ "flag"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "os"
+ "time"
+
+ "github.com/cloudflare/cfssl/config"
+ "github.com/cloudflare/cfssl/signer/local"
+ "github.com/golang/glog"
+ "google.golang.org/grpc"
+ "k8s.io/client-go/kubernetes"
+
+ pb "code.hackerspace.pl/hscloud/cluster/prodvider/proto"
+)
+
+var (
+ flagLDAPServer string
+ flagLDAPBindDN string
+ flagLDAPGroupSearchBase string
+ flagListenAddress string
+ flagKubernetesHost string
+
+ flagCACertificatePath string
+ flagCAKeyPath string
+ flagKubeCACertificatePath string
+
+ flagProdviderCN string
+)
+
+func init() {
+ flag.Set("logtostderr", "true")
+}
+
+type prodvider struct {
+ sign *local.Signer
+ k8s *kubernetes.Clientset
+ srv *grpc.Server
+ kubeCAPEM []byte
+}
+
+func newProdvider() *prodvider {
+ policy := &config.Signing{
+ Profiles: map[string]*config.SigningProfile{
+ "server": &config.SigningProfile{
+ Usage: []string{"signing", "key encipherment", "server auth"},
+ ExpiryString: "30d",
+ },
+ "client": &config.SigningProfile{
+ Usage: []string{"signing", "key encipherment", "client auth"},
+ ExpiryString: "30d",
+ },
+ "client-server": &config.SigningProfile{
+ Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
+ ExpiryString: "30d",
+ },
+ },
+ Default: config.DefaultConfig(),
+ }
+
+ sign, err := local.NewSignerFromFile(flagCACertificatePath, flagCAKeyPath, policy)
+ if err != nil {
+ glog.Exitf("Could not create signer: %v", err)
+ }
+
+ kubeCAPEM, err := ioutil.ReadFile(flagKubeCACertificatePath)
+ if err != nil {
+ glog.Exitf("Could not read kube CA cert path: %v")
+ }
+
+ return &prodvider{
+ sign: sign,
+ kubeCAPEM: kubeCAPEM,
+ }
+}
+
+// Timebomb restarts the prodvider after a deadline, usually 7 days +/- 4 days.
+// This is to ensure we serve with up-to-date certificates and that the service
+// can still come up after restart.
+func timebomb(srv *grpc.Server) {
+ deadline := time.Now()
+ deadline = deadline.Add(3 * 24 * time.Hour)
+ rand.Seed(time.Now().UnixNano())
+ jitter := rand.Intn(8 * 24 * 60 * 60)
+ deadline = deadline.Add(time.Duration(jitter) * time.Second)
+
+ glog.Infof("Timebomb deadline set to %v", deadline)
+
+ t := time.NewTicker(time.Minute)
+ for {
+ <-t.C
+ if time.Now().After(deadline) {
+ break
+ }
+ }
+
+ // Start killing connections, and wait one minute...
+ go srv.GracefulStop()
+ <-t.C
+ glog.Infof("Timebomb deadline exceeded, restarting.")
+ os.Exit(0)
+}
+
+func main() {
+ flag.StringVar(&flagLDAPServer, "ldap_server", "ldap.hackerspace.pl:636", "Address of LDAP server")
+ flag.StringVar(&flagLDAPBindDN, "ldap_bind_dn", "uid=%s,ou=People,dc=hackerspace,dc=pl", "LDAP Bind DN")
+ flag.StringVar(&flagLDAPGroupSearchBase, "ldap_group_search_base_dn", "ou=Group,dc=hackerspace,dc=pl", "LDAP Group Search Base DN")
+ flag.StringVar(&flagListenAddress, "listen_address", "127.0.0.1:8080", "gRPC listen address")
+ flag.StringVar(&flagKubernetesHost, "kubernetes_host", "k0.hswaw.net:4001", "Kubernetes API host")
+
+ flag.StringVar(&flagCACertificatePath, "ca_certificate_path", "", "CA certificate path (for signer)")
+ flag.StringVar(&flagCAKeyPath, "ca_key_path", "", "CA key path (for signer)")
+ flag.StringVar(&flagKubeCACertificatePath, "kube_ca_certificate_path", "", "CA certificate path (for checking kube apiserver)")
+
+ flag.StringVar(&flagProdviderCN, "prodvider_cn", "prodvider.hswaw.net", "CN of certificate that prodvider will use")
+ flag.Parse()
+
+ if flagCACertificatePath == "" || flagCAKeyPath == "" {
+ glog.Exitf("CA certificate and key must be provided")
+ }
+
+ p := newProdvider()
+ err := p.kubernetesConnect()
+ if err != nil {
+ glog.Exitf("Could not connect to kubernetes: %v", err)
+ }
+ creds := p.selfCreds()
+
+ // Start serving gRPC
+ grpcLis, err := net.Listen("tcp", flagListenAddress)
+ if err != nil {
+ glog.Exitf("Could not listen for gRPC on %q: %v", flagListenAddress, err)
+ }
+
+ glog.Infof("Starting gRPC on %q...", flagListenAddress)
+ grpcSrv := grpc.NewServer(creds)
+
+ pb.RegisterProdviderServer(grpcSrv, p)
+
+ go timebomb(grpcSrv)
+
+ err = grpcSrv.Serve(grpcLis)
+ if err != nil {
+ glog.Exitf("Could not serve gRPC: %v", err)
+ }
+}
diff --git a/cluster/prodvider/proto/BUILD.bazel b/cluster/prodvider/proto/BUILD.bazel
new file mode 100644
index 0000000..2efd457
--- /dev/null
+++ b/cluster/prodvider/proto/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+proto_library(
+ name = "proto_proto",
+ srcs = ["prodvider.proto"],
+ visibility = ["//visibility:public"],
+)
+
+go_proto_library(
+ name = "proto_go_proto",
+ compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+ importpath = "code.hackerspace.pl/hscloud/cluster/prodvider/proto",
+ proto = ":proto_proto",
+ visibility = ["//visibility:public"],
+)
+
+go_library(
+ name = "go_default_library",
+ embed = [":proto_go_proto"],
+ importpath = "code.hackerspace.pl/hscloud/cluster/prodvider/proto",
+ visibility = ["//visibility:public"],
+)
diff --git a/cluster/prodvider/proto/prodvider.proto b/cluster/prodvider/proto/prodvider.proto
new file mode 100644
index 0000000..1ae2798
--- /dev/null
+++ b/cluster/prodvider/proto/prodvider.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+package prodvider;
+option go_package = "code.hackerspace.pl/hscloud/cluster/prodvider/proto";
+
+message AuthenticateRequest {
+ string username = 1;
+ string password = 2;
+}
+
+message AuthenticateResponse {
+ enum Result {
+ RESULT_INVALID = 0;
+ RESULT_AUTHENTICATED = 1;
+ RESULT_INVALID_CREDENTIALS = 2;
+ }
+ Result result = 1;
+ KubernetesKeys kubernetes_keys = 2;
+}
+
+message KubernetesKeys {
+ string cluster = 1;
+ bytes ca = 2;
+ bytes cert = 3;
+ bytes key = 4;
+}
+
+service Prodvider {
+ rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse);
+}
diff --git a/cluster/prodvider/service.go b/cluster/prodvider/service.go
new file mode 100644
index 0000000..5635ac2
--- /dev/null
+++ b/cluster/prodvider/service.go
@@ -0,0 +1,104 @@
+package main
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/golang/glog"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ ldap "gopkg.in/ldap.v3"
+
+ pb "code.hackerspace.pl/hscloud/cluster/prodvider/proto"
+)
+
+var (
+ reUsername = regexp.MustCompile(`^[a-zA-Z0-9_\.]+$`)
+)
+
+func (p *prodvider) Authenticate(ctx context.Context, req *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ username := strings.TrimSpace(req.Username)
+ if username == "" || !reUsername.MatchString(username) {
+ return nil, status.Error(codes.InvalidArgument, "invalid username")
+ }
+
+ password := req.Password
+ if password == "" {
+ return &pb.AuthenticateResponse{
+ Result: pb.AuthenticateResponse_RESULT_INVALID_CREDENTIALS,
+ }, nil
+ }
+
+ tlsConfig := &tls.Config{}
+ lconn, err := ldap.DialTLS("tcp", flagLDAPServer, tlsConfig)
+ if err != nil {
+ glog.Errorf("ldap.DialTLS: %v", err)
+ return nil, status.Error(codes.Unavailable, "could not context LDAP")
+ }
+
+ dn := fmt.Sprintf(flagLDAPBindDN, username)
+ err = lconn.Bind(dn, password)
+
+ if err != nil {
+ if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
+ return &pb.AuthenticateResponse{
+ Result: pb.AuthenticateResponse_RESULT_INVALID_CREDENTIALS,
+ }, nil
+ }
+
+ glog.Errorf("ldap.Bind: %v", err)
+ return nil, status.Error(codes.Unavailable, "could not query LDAP")
+ }
+
+ groups, err := p.groupMemberships(lconn, username)
+ if err != nil {
+ return nil, err
+ }
+
+ if !groups["kubernetes-users"] && !groups["staff"] {
+ return nil, status.Error(codes.PermissionDenied, "not part of staff or kubernetes-users")
+ }
+
+ err = p.kubernetesSetupUser(username)
+ if err != nil {
+ glog.Errorf("kubernetesSetupUser(%v): %v", username, err)
+ return nil, status.Error(codes.Unavailable, "could not set up objects in Kubernetes")
+ }
+
+ keys, err := p.kubernetesCreds(username)
+ if err != nil {
+ glog.Errorf("kubernetesCreds(%q): %v", username, err)
+ return nil, status.Error(codes.Unavailable, "could not generate k8s keys")
+ }
+ return &pb.AuthenticateResponse{
+ Result: pb.AuthenticateResponse_RESULT_AUTHENTICATED,
+ KubernetesKeys: keys,
+ }, nil
+}
+
+func (p *prodvider) groupMemberships(lconn *ldap.Conn, username string) (map[string]bool, error) {
+ searchRequest := ldap.NewSearchRequest(
+ flagLDAPGroupSearchBase,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+ fmt.Sprintf("(uniqueMember=%s)", fmt.Sprintf(flagLDAPBindDN, username)),
+ []string{"dn", "cn"},
+ nil,
+ )
+
+ sr, err := lconn.Search(searchRequest)
+ if err != nil {
+ glog.Errorf("ldap.Search: %v", err)
+ return nil, status.Error(codes.Unavailable, "could not query LDAP for group")
+ }
+
+ res := make(map[string]bool)
+ for _, entry := range sr.Entries {
+ cn := entry.GetAttributeValue("cn")
+ res[cn] = true
+ }
+
+ return res, nil
+}