lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 1 | package main |
| 2 | |
| 3 | import ( |
| 4 | "context" |
| 5 | "flag" |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 6 | "io" |
| 7 | "time" |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 8 | |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 9 | "code.hackerspace.pl/hscloud/bgpwtf/cccampix/pgpencryptor/gpg" |
| 10 | "code.hackerspace.pl/hscloud/bgpwtf/cccampix/pgpencryptor/hkp" |
| 11 | "code.hackerspace.pl/hscloud/bgpwtf/cccampix/pgpencryptor/model" |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 12 | pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto" |
| 13 | "code.hackerspace.pl/hscloud/go/mirko" |
Serge Bazanski | 187c4bb | 2019-08-14 18:50:16 +0200 | [diff] [blame] | 14 | "github.com/golang/glog" |
| 15 | "github.com/lib/pq" |
| 16 | "google.golang.org/grpc/codes" |
| 17 | "google.golang.org/grpc/status" |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 18 | ) |
| 19 | |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 20 | var ( |
| 21 | flagMaxClients int |
| 22 | flagChunkSize int |
| 23 | flagHkpMaxWaitTime time.Duration |
| 24 | flagDSN string |
| 25 | ) |
| 26 | |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 27 | type service struct { |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 28 | hkpClient hkp.Client |
| 29 | encryptorFactory gpg.EncryptorFactory |
| 30 | clients chan (struct{}) |
| 31 | model model.Model |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 32 | } |
| 33 | |
| 34 | func (s *service) KeyInfo(ctx context.Context, req *pb.KeyInfoRequest) (*pb.KeyInfoResponse, error) { |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 35 | var data []byte |
| 36 | var err error |
| 37 | |
| 38 | switch req.Caching { |
| 39 | case pb.KeyInfoRequest_CACHING_AUTO: |
| 40 | _, err := s.model.GetKey(ctx, req.Fingerprint) |
| 41 | if err != nil { |
| 42 | data, err = s.hkpClient.GetKeyRing(ctx, req.Fingerprint) |
| 43 | switch err { |
| 44 | case nil: |
| 45 | break |
| 46 | case hkp.ErrKeyNotFound: |
| 47 | return nil, status.Errorf(codes.NotFound, "key not found: %v", err) |
| 48 | default: |
| 49 | return nil, status.Errorf(codes.Unavailable, "failed to get key from HKP servers: %v", err) |
| 50 | } |
| 51 | } |
| 52 | case pb.KeyInfoRequest_CACHING_FORCE_REMOTE: |
| 53 | data, err = s.hkpClient.GetKeyRing(ctx, req.Fingerprint) |
| 54 | switch err { |
| 55 | case nil: |
| 56 | break |
| 57 | case hkp.ErrKeyNotFound: |
| 58 | return nil, status.Errorf(codes.NotFound, "key not found: %v", err) |
| 59 | default: |
| 60 | return nil, status.Errorf(codes.Unavailable, "failed to get key from HKP servers: %v", err) |
| 61 | } |
| 62 | case pb.KeyInfoRequest_CACHING_FORCE_LOCAL: |
| 63 | _, err := s.model.GetKey(ctx, req.Fingerprint) |
| 64 | switch err { |
| 65 | case nil: |
| 66 | break |
| 67 | case model.ErrKeyNotFound: |
| 68 | return nil, status.Errorf(codes.NotFound, "key not found: %v", err) |
| 69 | default: |
| 70 | return nil, status.Errorf(codes.Unavailable, "failed to read key from local db: %v", err) |
| 71 | } |
| 72 | default: |
| 73 | return nil, status.Errorf(codes.InvalidArgument, "caching field value is invalid") |
| 74 | } |
| 75 | |
| 76 | // successfully read fresh key from hkp, update db |
| 77 | if data != nil { |
| 78 | err := s.model.PutKey(ctx, &model.PgpKey{ |
| 79 | Fingerprint: req.Fingerprint, |
| 80 | KeyData: data, |
| 81 | Okay: true, |
| 82 | }) |
| 83 | |
| 84 | if err != nil { |
| 85 | return nil, status.Errorf(codes.Unavailable, "failed to cache key received from HKP: %v", err) |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | return &pb.KeyInfoResponse{}, nil |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | func (s *service) Encrypt(stream pb.PGPEncryptor_EncryptServer) error { |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 93 | select { |
| 94 | case s.clients <- struct{}{}: |
| 95 | break |
| 96 | case <-stream.Context().Done(): |
| 97 | return status.Errorf(codes.ResourceExhausted, "PGPEncryptor to many sessions running, try again later") |
| 98 | } |
| 99 | |
| 100 | defer func() { |
| 101 | <-s.clients |
| 102 | }() |
| 103 | |
| 104 | ctx, _ := context.WithTimeout(context.Background(), flagHkpMaxWaitTime) |
| 105 | initialMessage, err := stream.Recv() |
| 106 | if err != nil { |
| 107 | return status.Errorf(codes.Canceled, "failed to read data from the client: %v", err) |
| 108 | } |
| 109 | |
| 110 | key, err := s.model.GetKey(ctx, initialMessage.Fingerprint) |
| 111 | if err != nil { |
| 112 | if err != nil { |
| 113 | switch err { |
| 114 | case model.ErrKeyNotFound: |
| 115 | return status.Errorf(codes.NotFound, "recipient key not found: %v", err) |
| 116 | default: |
| 117 | return status.Errorf(codes.Unavailable, "error when getting keyring: %v", err) |
| 118 | } |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | if key.Okay == false { |
| 123 | return status.Errorf(codes.InvalidArgument, "cached key is invalid, call PGPEncryptor.KeyInfo with caching=FORCE_REMOTE to refresh") |
| 124 | } |
| 125 | |
| 126 | senderDone := make(chan struct{}) |
| 127 | errors := make(chan error) |
| 128 | enc, err := s.encryptorFactory.Get(key.Fingerprint, key.KeyData) |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 129 | |
| 130 | if err != nil { |
| 131 | return status.Errorf(codes.Unavailable, "PGPEncryptor error while creating encryptor: %v", err) |
| 132 | } |
Serge Bazanski | 187c4bb | 2019-08-14 18:50:16 +0200 | [diff] [blame] | 133 | defer enc.Close() |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 134 | |
| 135 | err = enc.WritePlainText(initialMessage.Data) |
| 136 | if err != nil { |
| 137 | return err |
| 138 | } |
| 139 | |
| 140 | // keep reading incoming messages and pass them to the encryptor |
| 141 | go func() { |
| 142 | defer enc.Finish() |
| 143 | |
| 144 | for { |
| 145 | in, err := stream.Recv() |
| 146 | |
| 147 | if err != nil { |
| 148 | if err == io.EOF { |
| 149 | return |
| 150 | } |
| 151 | |
| 152 | errors <- status.Errorf(codes.Unavailable, "PGPEncryptor error while receiving message: %v", err) |
| 153 | return |
| 154 | } |
| 155 | |
| 156 | err = enc.WritePlainText(in.Data) |
| 157 | if err != nil { |
| 158 | errors <- status.Errorf(codes.Unavailable, "PGPEncryptor error while writing data to encryptor: %v", err) |
| 159 | return |
| 160 | } |
| 161 | |
| 162 | if in.Info == pb.EncryptRequest_CHUNK_LAST { |
| 163 | return |
| 164 | } |
| 165 | } |
| 166 | }() |
| 167 | |
| 168 | // start sender routine |
| 169 | go func() { |
| 170 | defer close(senderDone) |
| 171 | |
| 172 | for { |
| 173 | data, err := enc.ReadCipherText(flagChunkSize) |
| 174 | if err != nil && err != io.EOF { |
| 175 | errors <- status.Errorf(codes.Unavailable, "PGPEncryptor error while reading cipher stream: %v", err) |
| 176 | return |
| 177 | } |
| 178 | |
| 179 | info := pb.EncryptResponse_CHUNK_INFO_MORE |
| 180 | if err == io.EOF { |
| 181 | info = pb.EncryptResponse_CHUNK_LAST |
| 182 | } |
| 183 | |
| 184 | res := &pb.EncryptResponse{ |
| 185 | Data: data, |
| 186 | Info: info, |
| 187 | } |
| 188 | |
| 189 | err = stream.Send(res) |
| 190 | if err != nil { |
| 191 | errors <- status.Errorf(codes.Unavailable, "PGPEncryptor error while sending data to client: %v", err) |
| 192 | return |
| 193 | } |
| 194 | |
| 195 | if info == pb.EncryptResponse_CHUNK_LAST { |
| 196 | return |
| 197 | } |
| 198 | } |
| 199 | }() |
| 200 | |
| 201 | // sync with sender routine |
| 202 | select { |
| 203 | case <-senderDone: |
| 204 | return nil |
| 205 | case err := <-errors: |
| 206 | return err |
| 207 | } |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | func main() { |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 211 | flag.IntVar(&flagMaxClients, "maxClients", 20, "maximum number of concurrent encryption sessions") |
| 212 | flag.IntVar(&flagChunkSize, "chunkSize", 1024*8, "maximum size of chunk sent back to client") |
| 213 | flag.DurationVar(&flagHkpMaxWaitTime, "hkpMaxWaitTime", 10*time.Second, "maximum time awaiting reply from HKP") |
| 214 | flag.StringVar(&flagDSN, "dsn", "", "PostrgreSQL connection string") |
| 215 | |
| 216 | flag.DurationVar(&gpg.ExecutionTimeLimit, "gpgExecutionTimeLimit", 30*time.Second, "execution time limit for gpg commands") |
| 217 | flag.StringVar(&gpg.BinaryPath, "gpgPath", "gpg", "path to gpg binary") |
| 218 | |
| 219 | flag.DurationVar(&hkp.PerServerTimeLimit, "hkpPerServerTimeLimit", 5*time.Second, "time for HKP server to reply with key") |
| 220 | flag.IntVar(&hkp.PerServerRetryCount, "hkpPerServerRetryCount", 3, "retry count per HKP server") |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 221 | flag.Parse() |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 222 | |
| 223 | // Picking an existing postgres-like driver for sqlx.BindType to work |
| 224 | // See: https://github.com/jmoiron/sqlx/blob/ed7c52c43ee1e12a35efbcfea8dbae2d62a90370/bind.go#L24 |
| 225 | mirko.TraceSQL(&pq.Driver{}, "pgx") |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 226 | mi := mirko.New() |
| 227 | |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 228 | m, err := model.Connect(mi.Context(), "pgx", flagDSN) |
| 229 | if err != nil { |
| 230 | glog.Exitf("Failed to create model: %v", err) |
| 231 | } |
| 232 | |
| 233 | err = m.MigrateUp() |
| 234 | if err != nil { |
| 235 | glog.Exitf("Failed to migrate up: %v", err) |
| 236 | } |
| 237 | |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 238 | if err := mi.Listen(); err != nil { |
| 239 | glog.Exitf("Listen failed: %v", err) |
| 240 | } |
| 241 | |
lb5tr | 716ecf6 | 2019-08-05 17:33:29 -0700 | [diff] [blame] | 242 | s := &service{ |
| 243 | hkpClient: hkp.NewClient(), |
| 244 | encryptorFactory: gpg.CLIEncryptorFactory{}, |
| 245 | clients: make(chan struct{}, flagMaxClients), |
| 246 | model: m, |
| 247 | } |
lb5tr | e5f8e8a | 2019-07-23 11:27:07 -0700 | [diff] [blame] | 248 | pb.RegisterPGPEncryptorServer(mi.GRPC(), s) |
| 249 | |
| 250 | if err := mi.Serve(); err != nil { |
| 251 | glog.Exitf("Serve failed: %v", err) |
| 252 | } |
| 253 | |
| 254 | <-mi.Done() |
| 255 | } |