Draw the actual rest of the fucking owl.

Change-Id: Ia04fb49ebbe3a5afccc57e62f6335e35b45192fe
diff --git a/WORKSPACE b/WORKSPACE
index 3124716..bc9c217 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -112,8 +112,8 @@
     name = "prodimage-bionic",
     registry = "registry.k0.hswaw.net",
     repository = "q3k/prodimage",
-    tag = "20190814-1915",
-    digest = "sha256:708fdaa7beb76dd2c9d2f3773a202d0f59309a79b59ec40753a729e4e44af588",
+    tag = "20190822-1227",
+    digest = "sha256:1cd1f84169b8e1414a5d511b42909f2d540831c67b6799ae9af8cd6a80d75b5f",
 )
 
 container_pull(
diff --git a/bgpwtf/cccampix/birdie/BUILD.bazel b/bgpwtf/cccampix/birdie/BUILD.bazel
new file mode 100644
index 0000000..67fe480
--- /dev/null
+++ b/bgpwtf/cccampix/birdie/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+    name = "go_default_library",
+    srcs = ["birdie.go"],
+    importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/birdie",
+    visibility = ["//visibility:private"],
+    deps = [
+        "//bgpwtf/cccampix/proto:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//credentials:go_default_library",
+    ],
+)
+
+go_binary(
+    name = "birdie",
+    embed = [":go_default_library"],
+    static = "on",
+    visibility = ["//visibility:public"],
+)
diff --git a/bgpwtf/cccampix/birdie/birdie.go b/bgpwtf/cccampix/birdie/birdie.go
new file mode 100644
index 0000000..f455844
--- /dev/null
+++ b/bgpwtf/cccampix/birdie/birdie.go
@@ -0,0 +1,197 @@
+package main
+
+import (
+	"context"
+	"crypto/x509"
+	"flag"
+	"fmt"
+	"sort"
+	"strings"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"github.com/golang/glog"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+)
+
+var (
+	flagVerifier string
+	flagIdentity string
+	flagRouterID string
+	flagV6       bool
+	flagLocalIP  string
+)
+
+func init() {
+	flag.Set("logtostderr", "true")
+}
+
+func main() {
+	flag.StringVar(&flagVerifier, "verifier", "", "Verifier endpoint")
+	flag.StringVar(&flagIdentity, "identity", "", "Router identity")
+	flag.StringVar(&flagRouterID, "router_id", "", "Router ID")
+	flag.StringVar(&flagLocalIP, "local_ip", "", "Local IP")
+	flag.BoolVar(&flagV6, "v6", false, "V6")
+
+	flag.Parse()
+
+	if flagVerifier == "" {
+		glog.Exit("verifier must be set")
+	}
+	if flagIdentity == "" {
+		glog.Exit("identity must be set")
+	}
+	if flagRouterID == "" {
+		glog.Exit("router_id must be set")
+	}
+
+	cpool, _ := x509.SystemCertPool()
+	creds := credentials.NewClientTLSFromCert(cpool, "")
+	conn, err := grpc.Dial(flagVerifier, grpc.WithTransportCredentials(creds))
+	if err != nil {
+		glog.Exitf("Dial: %v", err)
+	}
+
+	ctx := context.Background()
+	verifier := pb.NewVerifierClient(conn)
+
+	req := &pb.RouterHeartbeatRequest{
+		Name: flagIdentity,
+	}
+
+	res, err := verifier.RouterHeartbeat(ctx, req)
+	if err != nil {
+		glog.Exitf("RouterHeartbeat: %v", err)
+	}
+
+	var config string
+	if flagV6 {
+		config = `
+log syslog all;
+
+router id %s;
+debug protocols { states, interfaces, events };
+
+timeformat base iso long;
+timeformat log iso long;
+timeformat protocol iso long;
+timeformat route iso long;
+
+protocol device {}
+
+function net_martians() {
+    return net ~ [ fc00::/7+, fec0::/10+, ::/128-, ::/0{0,15}, ::/0{49,128} ];
+}
+
+function generic_in() {
+	if net_martians() then return false;
+	if bgp_path.len > 64 then return false;
+	if net.len > 64 then return false;
+	if net.len < 12 then return false;
+	return true;
+}
+`
+		config = fmt.Sprintf(config, flagRouterID)
+	} else {
+		config = `
+log syslog all;
+
+router id %s;
+debug protocols { states, interfaces, events };
+
+timeformat base iso long;
+timeformat log iso long;
+timeformat protocol iso long;
+timeformat route iso long;
+
+protocol device {}
+
+function net_martians() {
+    return net ~ [ 169.254.0.0/16+, 172.16.0.0/12+, 192.168.0.0/16+, 10.0.0.0/8+,
+        127.0.0.0/8+, 224.0.0.0/4+, 240.0.0.0/4+, 0.0.0.0/32-, 0.0.0.0/0{25,32}, 0.0.0.0/0{0,7} ];
+}
+
+function generic_in() {
+	if net_martians() then return false;
+	if bgp_path.len > 64 then return false;
+	if net.len > 24 then return false;
+	if net.len < 8 then return false;
+	return true;
+}
+`
+		config = fmt.Sprintf(config, flagRouterID)
+	}
+
+	sort.Slice(res.AsConfigs, func(i, j int) bool {
+		return res.AsConfigs[i].Asn < res.AsConfigs[j].Asn
+	})
+
+	for _, asc := range res.AsConfigs {
+		sort.Slice(asc.Routers, func(i, j int) bool {
+			return asc.Routers[i].Password < asc.Routers[j].Password
+		})
+		for i, router := range asc.Routers {
+			addr := ""
+			if flagV6 {
+				addr = router.Ipv6
+			} else {
+				addr = router.Ipv4
+			}
+			if addr == "" {
+				continue
+			}
+			peerid := fmt.Sprintf("PEER_%d_%d", asc.Asn, i)
+			prefixes := []string{}
+			for _, prefix := range asc.Prefixes {
+				if flagV6 && !strings.Contains(prefix.Prefix, ":") {
+					continue
+				}
+				if !flagV6 && !strings.Contains(prefix.Prefix, ".") {
+					continue
+				}
+				parts := strings.Split(prefix.Prefix, "/")
+				addr := parts[0]
+				bits := parts[1]
+				filter := fmt.Sprintf("%s/%s{%s,%d}", addr, bits, bits, prefix.MaxLength)
+				if fmt.Sprintf("%d", prefix.MaxLength) == bits {
+					filter = fmt.Sprintf("%s/%s", addr, bits)
+				}
+				prefixes = append(prefixes, filter)
+			}
+			if len(prefixes) == 0 {
+				continue
+			}
+			allowed := strings.Join(prefixes, ",")
+
+			part := `
+filter %s_in {
+	if !generic_in() then reject;
+	if net ~ [ %s ] then accept;
+	reject;
+}
+`
+			part = fmt.Sprintf(part, peerid, allowed)
+			config += part
+
+			part = `
+filter %s_out {
+	accept;
+}
+`
+			part = fmt.Sprintf(part, peerid)
+			config += part
+
+			part = `
+protocol bgp %s {
+	local %s as 208521;
+	neighbor %s as %d;
+	import filter %s_in;
+}
+`
+			part = fmt.Sprintf(part, peerid, flagLocalIP, addr, asc.Asn, peerid)
+			config += part
+		}
+	}
+
+	fmt.Println(config)
+}
diff --git a/bgpwtf/cccampix/frontend/frontend.py b/bgpwtf/cccampix/frontend/frontend.py
index 2a6f240..c7558a7 100644
--- a/bgpwtf/cccampix/frontend/frontend.py
+++ b/bgpwtf/cccampix/frontend/frontend.py
@@ -17,6 +17,7 @@
 
 check_info = {
     'irr': ('IRR', 'Required IRR entires are present for this AS'),
+    'pgp': ('PGP', 'The PGP key defined in the IRR entry exists on PGP keyservers'),
 }
 
 
@@ -55,7 +56,22 @@
                 return 'Internal server error.'
         
         return render_template('asn.html', details=details, asn=asn, check_info=check_info)
-    
+
+    @app.route('/asn/<int:asn>/config.gpg')
+    def view_asn_config(asn):
+        req = ipb.PeerSecretsRequest()
+        req.asn = asn
+
+        details = None
+        try:
+            details = verifier.stub(ipb_grpc.VerifierStub).PeerSecrets(req)
+        except grpc.RpcError as e:
+            if e.code() == grpc.StatusCode.NOT_FOUND:
+                return 'No such ASN.'
+            else:
+                return 'Internal server error.'
+
+        return details.pgp_data, 200, {'Content-Type': 'application/octet-stream'}
     
     @app.template_filter()
     def from_nano(v):
diff --git a/bgpwtf/cccampix/frontend/templates/asn.html b/bgpwtf/cccampix/frontend/templates/asn.html
index 0eb3a61..a9fdc78 100644
--- a/bgpwtf/cccampix/frontend/templates/asn.html
+++ b/bgpwtf/cccampix/frontend/templates/asn.html
@@ -102,5 +102,11 @@
                 {% endfor %}
             </table>
         </p>
+        <h2>AS{{ asn }} configuration</h2>
+        <p>
+            To get configuration data for your routers, please decode the following GPG secret:
+
+            <pre>curl https://ix-status.bgp.wtf/asn/{{ asn }}/config.gpg | gpg --decrypt</pre>
+        </p>
     </body>
 </html>
diff --git a/bgpwtf/cccampix/kube/ix.libsonnet b/bgpwtf/cccampix/kube/ix.libsonnet
index 297fd34..ff74f2b 100644
--- a/bgpwtf/cccampix/kube/ix.libsonnet
+++ b/bgpwtf/cccampix/kube/ix.libsonnet
@@ -5,11 +5,12 @@
         local ix = self,
         local cfg = ix.cfg,
         cfg:: {
-            image: "registry.k0.hswaw.net/bgpwtf/cccampix:1565803250-3a1811e363502c697ea337c15d653698bd662dae",
+            image: "registry.k0.hswaw.net/bgpwtf/cccampix:1566475793-53f188c8fe83781ac057a3442830c6aa3dce5269",
 
             domain: "ix-status.bgp.wtf",
+            grpcDomain: "ix-grpc.bgp.wtf",
             octorpki: {
-                image: "registry.k0.hswaw.net/bgpwtf/cccampix:1565469898-95928eecd7e35e8582fa011d1457643ca398c310",
+                image: cfg.image,
                 storageClassName: "waw-hdd-redundant-2",
                 resources: {
                     requests: { cpu: "200m", memory: "1Gi" },
@@ -218,6 +219,7 @@
                 "-peeringdb=" + ix.peeringdb.address,
                 "-irr=" + ix.irr.address,
                 "-octorpki=" + ix.octorpki.address,
+                "-pgpencryptor=" + ix.pgpencryptor.address,
             ] + ix.crdb.args(cfg.verifier.db),
         },
 
@@ -299,5 +301,33 @@
                 ],
             },
         },
+
+        grpcIngress: kube.Ingress("grpc") {
+            metadata+: ix.metadata("grpc") {
+                annotations+: {
+                    "kubernetes.io/tls-acme": "true",
+                    "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
+                    "kubernetes.io/ingress.class": "nginx",
+                    "nginx.ingress.kubernetes.io/ssl-redirect": "true",
+                    "nginx.ingress.kubernetes.io/backend-protocol": "GRPC",
+                    "nginx.ingress.kubernetes.io/whitelist-source-range": "185.236.240.34/32",
+                },
+            },
+            spec+: {
+                tls: [
+                    { hosts: [cfg.grpcDomain], secretName: "grpc-tls"}
+                ],
+                rules: [
+                    {
+                        host: cfg.grpcDomain,
+                        http: {
+                            paths: [
+                                { path: "/", backend: ix.verifier.svc.name_port },
+                            ],
+                        },
+                    },
+                ],
+            },
+        },
     },
 }
diff --git a/bgpwtf/cccampix/octorpki/entrypoint.sh b/bgpwtf/cccampix/octorpki/entrypoint.sh
index 62c59b1..a9ef3e2 100644
--- a/bgpwtf/cccampix/octorpki/entrypoint.sh
+++ b/bgpwtf/cccampix/octorpki/entrypoint.sh
@@ -4,4 +4,4 @@
 
 cd /octorpki
 
-./octorpki -cache /cache/ -output.sign=false "$@"
+./octorpki -cache /cache/ -output.sign=false -output.wait=false "$@"
diff --git a/bgpwtf/cccampix/proto/ix.proto b/bgpwtf/cccampix/proto/ix.proto
index 65bf354..229db56 100644
--- a/bgpwtf/cccampix/proto/ix.proto
+++ b/bgpwtf/cccampix/proto/ix.proto
@@ -130,10 +130,45 @@
     PeeringDBMember peeringdb_info = 3;
 }
 
+message RouterHeartbeatRequest {
+    string name = 1;
+    string current_version = 2; 
+}
+
+message RouterHeartbeatResponse {
+    message ASConfig {
+        int64 asn = 1;
+        message Router {
+            string ipv6 = 1;
+            string ipv4 = 2;
+            string password = 3;
+        };
+        repeated Router routers = 2;
+        message AllowedPrefix {
+            string prefix = 1;
+            int64 max_length = 2;
+        };
+        repeated AllowedPrefix prefixes = 3;
+    };
+    repeated ASConfig as_configs = 1;
+    string version = 2;
+    uint64 call_again = 3;
+}
+
+message PeerSecretsRequest {
+    int64 asn = 1;
+}
+
+message PeerSecretsResponse {
+    bytes pgp_data = 1;
+}
+
 service Verifier {
     rpc ProcessorStatus(ProcessorStatusRequest) returns (ProcessorStatusResponse);
     rpc PeerSummary(PeerSummaryRequest) returns (stream PeerSummaryResponse);
     rpc PeerDetails(PeerDetailsRequest) returns (PeerDetailsResponse);
+    rpc RouterHeartbeat(RouterHeartbeatRequest) returns (RouterHeartbeatResponse);
+    rpc PeerSecrets(PeerSecretsRequest) returns (PeerSecretsResponse);
 }
 
 message KeyInfoRequest {
diff --git a/bgpwtf/cccampix/verifier/BUILD.bazel b/bgpwtf/cccampix/verifier/BUILD.bazel
index f7d15e3..dc9bf22 100644
--- a/bgpwtf/cccampix/verifier/BUILD.bazel
+++ b/bgpwtf/cccampix/verifier/BUILD.bazel
@@ -6,9 +6,11 @@
         "main.go",
         "processor_irr.go",
         "processor_peeringdb.go",
+        "processor_pgp.go",
         "processor_rpki.go",
         "processor_secretgen.go",
         "processors.go",
+        "service.go",
         "state.go",
         "statusz.go",
     ],
diff --git a/bgpwtf/cccampix/verifier/main.go b/bgpwtf/cccampix/verifier/main.go
index 591ee33..371545e 100644
--- a/bgpwtf/cccampix/verifier/main.go
+++ b/bgpwtf/cccampix/verifier/main.go
@@ -10,12 +10,12 @@
 	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
 	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
 	"code.hackerspace.pl/hscloud/go/mirko"
+	"code.hackerspace.pl/hscloud/go/pki"
 	"code.hackerspace.pl/hscloud/go/statusz"
 	"github.com/golang/glog"
 	"github.com/lib/pq"
 	"golang.org/x/net/trace"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
+	"google.golang.org/grpc"
 )
 
 type processorState struct {
@@ -25,11 +25,17 @@
 	lastErr error
 }
 
+type routerState struct {
+	name    string
+	last    *time.Time
+	version string
+}
+
 func (p *processorState) nextRun() *time.Time {
 	if p.lastRun == nil {
 		return nil
 	}
-	nr := p.p.NextRun(*p.lastRun)
+	nr := p.p.NextRun(*p.lastRun, p.lastErr != nil)
 	return &nr
 }
 
@@ -39,7 +45,13 @@
 	processors   map[string]*processorState
 	processorsMu sync.RWMutex
 
+	routers            map[string]*routerState
+	routersLastVersion string
+	routersMu          sync.RWMutex
+
 	requiredChecks []string
+
+	pgp pb.PGPEncryptorClient
 }
 
 func (s *service) run(ctx context.Context) {
@@ -66,13 +78,20 @@
 		nr := p.nextRun()
 		if nr == nil || nr.Before(now) {
 			glog.Infof("Running processor %q...", p.name)
+			start := time.Now()
+
 			tr := trace.New(fmt.Sprintf("processor.%s", p.name), "Run")
 			pctx := trace.NewContext(ctx, tr)
 			err := p.p.RunAll(pctx, s.model)
 			tr.LazyPrintf("Processor done: %v", err)
 			tr.Finish()
+
 			if err != nil {
 				glog.Errorf("Running processor %q failed: %v", p.name, err)
+			} else {
+				diff := time.Since(start)
+				tr.LazyPrintf("Took %s", diff.String())
+				glog.Infof("Processor %q took %s", p.name, diff.String())
 			}
 			p.lastErr = err
 			p.lastRun = &now
@@ -81,17 +100,19 @@
 }
 
 var (
-	flagDSN       string
-	flagPeeringDB string
-	flagIRR       string
-	flagOctoRPKI  string
+	flagDSN          string
+	flagIRR          string
+	flagOctoRPKI     string
+	flagPGPEncryptor string
+	flagPeeringDB    string
 )
 
 func main() {
 	flag.StringVar(&flagDSN, "dsn", "", "PostrgreSQL connection string")
-	flag.StringVar(&flagPeeringDB, "peeringdb", "", "Address of peeringdb service")
 	flag.StringVar(&flagIRR, "irr", "", "Address of irr service")
 	flag.StringVar(&flagOctoRPKI, "octorpki", "", "Address of octorpki service")
+	flag.StringVar(&flagPGPEncryptor, "pgpencryptor", "", "Address of pgpencryptor service")
+	flag.StringVar(&flagPeeringDB, "peeringdb", "", "Address of peeringdb service")
 	flag.Parse()
 
 	// Picking an existing postgres-like driver for sqlx.BindType to work
@@ -113,10 +134,17 @@
 		glog.Exitf("Listen failed: %v", err)
 	}
 
+	conn, err := grpc.Dial(flagPGPEncryptor, pki.WithClientHSPKI())
+	if err != nil {
+		glog.Exitf("could not connect to pgpencryptor service: %v", err)
+	}
+
 	s := &service{
 		model:          m,
 		processors:     make(map[string]*processorState),
-		requiredChecks: []string{"irr"},
+		requiredChecks: []string{"irr", "pgp"},
+		routers:        make(map[string]*routerState),
+		pgp:            pb.NewPGPEncryptorClient(conn),
 	}
 
 	must := func(p processor, err error) processor {
@@ -129,6 +157,7 @@
 	s.addProcessor(must(newIRR(flagIRR)))
 	s.addProcessor(must(newSecretGen()))
 	s.addProcessor(must(newRPKI(flagOctoRPKI)))
+	s.addProcessor(must(newPGP(s.pgp)))
 	statusz.AddStatusPart("Processors", processorsFragment, s.statuszProcessors)
 
 	go s.run(mi.Context())
@@ -156,164 +185,3 @@
 		lastRun: nil,
 	}
 }
-
-func (s *service) ProcessorStatus(ctx context.Context, req *pb.ProcessorStatusRequest) (*pb.ProcessorStatusResponse, error) {
-	s.processorsMu.RLock()
-	defer s.processorsMu.RUnlock()
-
-	res := &pb.ProcessorStatusResponse{
-		Processors: make([]*pb.ProcessorStatusResponse_Processor, len(s.processors)),
-	}
-
-	i := 0
-	for _, p := range s.processors {
-		res.Processors[i] = &pb.ProcessorStatusResponse_Processor{
-			Name:    p.name,
-			Status:  pb.ProcessorStatusResponse_Processor_STATUS_OK,
-			LastRun: 0,
-			NextRun: 0,
-		}
-
-		if p.lastRun != nil {
-			res.Processors[i].LastRun = p.lastRun.UnixNano()
-			res.Processors[i].NextRun = p.p.NextRun(*p.lastRun).UnixNano()
-		}
-
-		if p.lastErr != nil {
-			res.Processors[i].Status = pb.ProcessorStatusResponse_Processor_STATUS_ERROR
-		}
-
-		i += 1
-	}
-	return res, nil
-}
-
-func (s *service) PeerSummary(req *pb.PeerSummaryRequest, stream pb.Verifier_PeerSummaryServer) error {
-	peers, err := s.model.GetCheckablePeers(stream.Context())
-	if err != nil {
-		glog.Errorf("model.GetCheckablePeers: %v", err)
-		return status.Error(codes.Unavailable, "model error")
-	}
-
-	asns := make([]int64, len(peers))
-	asnToRes := make(map[int64]*pb.PeerSummaryResponse)
-
-	for i, peer := range peers {
-		routers := make([]*pb.PeeringDBMember_Router, len(peer.Routers))
-		for i, router := range peer.Routers {
-			routers[i] = &pb.PeeringDBMember_Router{}
-			if router.V4 != nil {
-				routers[i].Ipv4 = router.V4.String()
-			}
-			if router.V6 != nil {
-				routers[i].Ipv6 = router.V6.String()
-			}
-		}
-		p := &pb.PeeringDBMember{
-			Asn:     peer.ASN,
-			Name:    peer.Name,
-			Routers: routers,
-		}
-		res := &pb.PeerSummaryResponse{
-			PeeringdbInfo: p,
-			CheckStatus:   pb.PeerSummaryResponse_STATUS_OK,
-		}
-		asnToRes[peer.ASN] = res
-		asns[i] = peer.ASN
-	}
-
-	checkres, err := s.model.GetPeerCheckResults(stream.Context(), asns)
-	if err != nil {
-		glog.Errorf("GetPeerCheckResults(%v): %v", asns, err)
-		for _, res := range asnToRes {
-			res.CheckStatus = pb.PeerSummaryResponse_STATUS_UNKNOWN
-		}
-	} else {
-		passedChecks := make(map[int64]map[string]bool)
-		for _, c := range checkres {
-			if _, ok := passedChecks[c.PeerASN]; !ok {
-				passedChecks[c.PeerASN] = make(map[string]bool)
-			}
-			passedChecks[c.PeerASN][c.CheckName] = c.Status == model.PeerCheckStatus_Okay
-		}
-
-		for asn, checks := range passedChecks {
-			for _, required := range s.requiredChecks {
-				if !checks[required] {
-					asnToRes[asn].CheckStatus = pb.PeerSummaryResponse_STATUS_FAILED
-					break
-				}
-			}
-		}
-	}
-
-	for _, res := range asnToRes {
-		if err := stream.Send(res); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *service) PeerDetails(ctx context.Context, req *pb.PeerDetailsRequest) (*pb.PeerDetailsResponse, error) {
-	if req.Asn <= 0 {
-		return nil, status.Error(codes.InvalidArgument, "asn must be set")
-	}
-
-	res := &pb.PeerDetailsResponse{}
-
-	peeringdb, err := s.model.GetPeeringDBPeer(ctx, req.Asn)
-	if err != nil {
-		glog.Errorf("GetPeeringDBPeer(%v): %v", req.Asn, err)
-		return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
-	}
-
-	if peeringdb.Asn != req.Asn {
-		return nil, status.Error(codes.NotFound, "no such ASN")
-	}
-
-	res.PeeringdbInfo = peeringdb
-
-	checkres, err := s.model.GetPeerCheckResults(ctx, []int64{req.Asn})
-	if err != nil {
-		glog.Errorf("GetPeerCheckResults(%v): %v", req.Asn, err)
-		return nil, status.Error(codes.Unavailable, "could not get check results")
-	}
-
-	res.Checks = make([]*pb.PeerDetailsResponse_Check, len(checkres))
-	for i, check := range checkres {
-		status := pb.PeerDetailsResponse_Check_STATUS_INVALID
-		switch check.Status {
-		case model.PeerCheckStatus_Okay:
-			status = pb.PeerDetailsResponse_Check_STATUS_OK
-		case model.PeerCheckStatus_SoftFailed:
-			status = pb.PeerDetailsResponse_Check_STATUS_OK
-		case model.PeerCheckStatus_Failed:
-			status = pb.PeerDetailsResponse_Check_STATUS_FAILED
-		}
-		res.Checks[i] = &pb.PeerDetailsResponse_Check{
-			Name:   check.CheckName,
-			Status: status,
-			Time:   check.Time.UnixNano(),
-			Msg:    check.Message,
-		}
-	}
-
-	prefixes, err := s.model.GetAllowedPrefixes(ctx, req.Asn)
-	if err != nil {
-		glog.Errorf("GetAllowedPrefixes(%v): %v", req.Asn, err)
-		return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
-	}
-
-	res.AllowedPrefixes = make([]*pb.PeerDetailsResponse_AllowedPrefix, len(prefixes))
-	for i, prefix := range prefixes {
-		res.AllowedPrefixes[i] = &pb.PeerDetailsResponse_AllowedPrefix{
-			Prefix:    prefix.Prefix.String(),
-			MaxLength: prefix.MaxLength,
-			Ta:        prefix.TA,
-		}
-	}
-
-	return res, nil
-}
diff --git a/bgpwtf/cccampix/verifier/model/config.go b/bgpwtf/cccampix/verifier/model/config.go
index bafd46f..1cbd2d7 100644
--- a/bgpwtf/cccampix/verifier/model/config.go
+++ b/bgpwtf/cccampix/verifier/model/config.go
@@ -4,6 +4,8 @@
 	"context"
 	"database/sql"
 	"fmt"
+	"net"
+	"strconv"
 )
 
 func (m *sqlModel) ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error {
@@ -49,3 +51,75 @@
 
 	return tx.Commit()
 }
+
+func (m *sqlModel) GetPeerConfiguration(ctx context.Context) ([]*PeerConfiguration, error) {
+	q := `
+		SELECT
+		    peers.asn "asn",
+			peer_pgp_keys.fingerprint "peer_pgp_keys.fingerprint",
+			peer_routers.v6 "peer_routers.v6", peer_routers.v4 "peer_routers.v4",
+			session_configs.bgp_secret "session_configs.bgp_secret"
+		FROM session_configs
+		LEFT JOIN peer_routers
+		ON peer_routers.id = session_configs.peer_router_id
+		INNER JOIN peer_pgp_keys
+		ON peer_pgp_keys.peer_id = session_configs.peer_id
+		LEFT JOIN peers
+		on peers.id = session_configs.peer_id
+	`
+
+	data := []struct {
+		PGP    sqlPeerPGPKey    `db:"peer_pgp_keys"`
+		Config sqlSessionConfig `db:"session_configs"`
+		Router sqlPeerRouter    `db:"peer_routers"`
+		ASN    string           `db:"asn"`
+	}{}
+
+	if err := m.db.SelectContext(ctx, &data, q); err != nil {
+		return nil, fmt.Errorf("SELECT peers/peer_pgp_keys/session_configs: %v", err)
+	}
+
+	resM := make(map[string]*PeerConfiguration)
+
+	for _, d := range data {
+		k := fmt.Sprintf("%s", d.ASN)
+		r, ok := resM[k]
+		if !ok {
+			asn, err := strconv.ParseInt(d.ASN, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("data corruption: invalid ASN %q", d.ASN)
+			}
+			r = &PeerConfiguration{
+				Peer: Peer{
+					ASN:     asn,
+					Routers: []*Router{},
+				},
+				Key: PeerPGPKey{
+					PeerASN:     asn,
+					Fingerprint: d.PGP.Fingerprint,
+				},
+			}
+			resM[k] = r
+		}
+
+		v6 := net.ParseIP(d.Router.V6.String)
+		v4 := net.ParseIP(d.Router.V4.String)
+		secret := d.Config.BGPSecret
+		r.Peer.Routers = append(r.Peer.Routers, &Router{
+			V6: v6,
+			V4: v4,
+			Config: &SessionConfig{
+				BGPSecret: secret,
+			},
+		})
+	}
+
+	res := make([]*PeerConfiguration, len(resM))
+	i := 0
+	for _, pc := range resM {
+		res[i] = pc
+		i += 1
+	}
+
+	return res, nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.down.sql
new file mode 100644
index 0000000..a768e04
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.down.sql
@@ -0,0 +1,3 @@
+set sql_safe_updates=false;
+ALTER TABLE peer_pgp_keys DROP COLUMN state;
+set sql_safe_updates=true;
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.up.sql
new file mode 100644
index 0000000..e7d7fd4
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565806867_pgp_key_state.up.sql
@@ -0,0 +1,2 @@
+ALTER TABLE peer_pgp_keys ADD COLUMN state STRING check ( state = 'unchecked' or state = 'unknown' or state = 'known' ) NOT NULL DEFAULT 'unchecked';
+ALTER TABLE peer_pgp_keys ALTER COLUMN state DROP DEFAULT;
diff --git a/bgpwtf/cccampix/verifier/model/model.go b/bgpwtf/cccampix/verifier/model/model.go
index b9b81c9..53294dd 100644
--- a/bgpwtf/cccampix/verifier/model/model.go
+++ b/bgpwtf/cccampix/verifier/model/model.go
@@ -23,40 +23,48 @@
 	GetPeeringDBPeer(ctx context.Context, asn int64) (*pb.PeeringDBMember, error)
 
 	GetCheckablePeers(ctx context.Context) ([]*Peer, error)
-	SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error
+	SubmitPeerCheckResults(ctx context.Context, checkName string, res []*PeerCheckResult) error
 	GetPeerCheckResults(ctx context.Context, asn []int64) ([]*PeerCheckResult, error)
 
 	UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error
+	GetPGPKeysRequiringAttention(ctx context.Context) ([]*PeerPGPKey, error)
+	ValidatePGPKeys(ctx context.Context, positive, negative []string) error
+	GetPeerPGPKey(ctx context.Context, asn int64) (*PeerPGPKey, error)
 
 	ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error
 
+	GetPeerConfiguration(ctx context.Context) ([]*PeerConfiguration, error)
+
 	UpdateAllowedPrefixes(ctx context.Context, asn int64, prefixes []*AllowedPrefix) error
 	GetAllowedPrefixes(ctx context.Context, asn int64) ([]*AllowedPrefix, error)
 }
 
-type stringer struct {
+type Router struct {
+	V6     net.IP
+	V4     net.IP
+	Config *SessionConfig
 }
 
-func (s *stringer) String() string {
-	if s == nil {
+func (p *Router) String() string {
+	if p == nil {
 		return "<nil>"
 	}
-	return fmt.Sprintf("%+v", *s)
-}
-
-type Router struct {
-	stringer
-	V6 net.IP
-	V4 net.IP
+	return fmt.Sprintf("%+v", *p)
 }
 
 type Peer struct {
-	stringer
 	ASN     int64
 	Name    string
 	Routers []*Router
 }
 
+func (p *Peer) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
+}
+
 type PeerCheckStatus int
 
 const (
@@ -82,15 +90,41 @@
 }
 
 type PeerPGPKey struct {
-	stringer
 	PeerASN     int64
 	Fingerprint string
+	State       string
+}
+
+func (p *PeerPGPKey) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
 }
 
 type SessionConfig struct {
 	BGPSecret string
 }
 
+func (p *SessionConfig) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
+}
+
+type PeerConfiguration struct {
+	Peer Peer
+	Key  PeerPGPKey
+}
+
+func (p *PeerConfiguration) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
+}
+
 type AllowedPrefix struct {
 	Prefix    net.IPNet
 	MaxLength int64
diff --git a/bgpwtf/cccampix/verifier/model/pgp.go b/bgpwtf/cccampix/verifier/model/pgp.go
index a76186e..9292c45 100644
--- a/bgpwtf/cccampix/verifier/model/pgp.go
+++ b/bgpwtf/cccampix/verifier/model/pgp.go
@@ -2,22 +2,26 @@
 
 import (
 	"context"
+	"database/sql"
 	"fmt"
+	"strconv"
 	"time"
 )
 
 func (s *sqlModel) UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error {
 	q := `
 		INSERT INTO peer_pgp_keys
-			(peer_id, fingerprint, time_created)
+			(peer_id, fingerprint, time_created, state)
 		SELECT
-			peers.id, :fingerprint, :time_created
+			peers.id, :fingerprint, :time_created, 'unchecked'
 		FROM peers
 		WHERE peers.asn = :asn
 		ON CONFLICT (peer_id)
 		DO UPDATE SET
 			fingerprint = :fingerprint,
-			time_created = :time_created
+			time_created = :time_created,
+			state = 'unchecked'
+		WHERE peer_pgp_keys.fingerprint != excluded.fingerprint
 	`
 	data := &sqlPeerPGPKey{
 		Fingerprint: key.Fingerprint,
@@ -29,3 +33,105 @@
 	}
 	return nil
 }
+
+func (s *sqlModel) GetPGPKeysRequiringAttention(ctx context.Context) ([]*PeerPGPKey, error) {
+	q := `
+		SELECT
+			peer_pgp_keys.fingerprint "fingerprint",
+			peer_pgp_keys.state "state",
+			peers.asn "asn"
+		FROM peer_pgp_keys
+		LEFT JOIN peers
+		ON peers.id = peer_pgp_keys.peer_id
+		WHERE
+			peer_pgp_keys.state = 'unchecked'
+		OR
+			peer_pgp_keys.state = 'known'
+		OR (
+			peer_pgp_keys.state = 'unknown' AND
+			peer_pgp_keys.time_created > $1
+		)
+	`
+
+	data := []sqlPeerPGPKey{}
+	timestamp := time.Now().Add(-time.Hour).UnixNano()
+	if err := s.db.SelectContext(ctx, &data, q, timestamp); err != nil {
+		return nil, fmt.Errorf("SELECT peer_pgp_keys: %v", err)
+	}
+
+	res := make([]*PeerPGPKey, len(data))
+	for i, datum := range data {
+		asn, err := strconv.ParseInt(datum.ASN, 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("data corruption: peer_pgp_keys as ASN %q", datum.ASN)
+		}
+		res[i] = &PeerPGPKey{
+			Fingerprint: datum.Fingerprint,
+			State:       datum.State,
+			PeerASN:     asn,
+		}
+	}
+
+	return res, nil
+}
+
+func (s *sqlModel) ValidatePGPKeys(ctx context.Context, positive, negative []string) error {
+	tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	timestamp := time.Now().UnixNano()
+
+	for _, p := range positive {
+		q := `
+			UPDATE
+				peer_pgp_keys
+			SET
+				state = 'known',
+				time_created = $2
+			WHERE
+				fingerprint = $1	
+		`
+
+		if _, err := tx.ExecContext(ctx, q, p, timestamp); err != nil {
+			return fmt.Errorf("UPDATE peer_pgp_keys: %v", err)
+		}
+	}
+
+	for _, n := range negative {
+		q := `
+			UPDATE
+				peer_pgp_keys
+			SET
+				state = 'unknown',
+				time_created = $2
+			WHERE
+				fingerprint = $1	
+		`
+
+		if _, err := tx.ExecContext(ctx, q, n, timestamp); err != nil {
+			return fmt.Errorf("UPDATE peer_pgp_keys: %v", err)
+		}
+	}
+
+	return tx.Commit()
+}
+
+func (s *sqlModel) GetPeerPGPKey(ctx context.Context, asn int64) (*PeerPGPKey, error) {
+	q := `
+		SELECT peer_pgp_keys.fingerprint
+		FROM peer_pgp_keys
+		LEFT JOIN peers
+		ON peers.id = peer_pgp_keys.peer_id
+		WHERE peers.asn = $1
+	`
+	data := []*PeerPGPKey{}
+	if err := s.db.SelectContext(ctx, &data, q, asn); err != nil {
+		return nil, fmt.Errorf("SELECT peer_pgp_keys: %v", err)
+	}
+
+	if len(data) != 1 {
+		return nil, fmt.Errorf("wrong number of peer_pgp_keys (%d)", len(data))
+	}
+
+	return data[0], nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/schema.go b/bgpwtf/cccampix/verifier/model/schema.go
index 093ecc8..1a483c2 100644
--- a/bgpwtf/cccampix/verifier/model/schema.go
+++ b/bgpwtf/cccampix/verifier/model/schema.go
@@ -38,6 +38,7 @@
 	PeerID      string `db:"peer_id"`
 	Fingerprint string `db:"fingerprint"`
 	TimeCreated int64  `db:"time_created"`
+	State       string `db:"state"`
 
 	// Fake, used by app logic.
 	ASN string `db:"asn"`
diff --git a/bgpwtf/cccampix/verifier/model/submit_checks.go b/bgpwtf/cccampix/verifier/model/submit_checks.go
index 79e2a84..0586766 100644
--- a/bgpwtf/cccampix/verifier/model/submit_checks.go
+++ b/bgpwtf/cccampix/verifier/model/submit_checks.go
@@ -8,15 +8,21 @@
 	"github.com/golang/glog"
 )
 
-func (s *sqlModel) SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error {
+func (s *sqlModel) SubmitPeerCheckResults(ctx context.Context, checkName string, res []*PeerCheckResult) error {
 	tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
 	defer tx.Rollback()
 
+	glog.Infof("SubmitPeerCheckResults:")
+	for _, r := range res {
+		glog.Infof(" - %+v", *r)
+	}
+
 	q := `
 		UPDATE peer_checks
 		SET delete = true
+		WHERE check_name = $1
 	`
-	if _, err := tx.ExecContext(ctx, q); err != nil {
+	if _, err := tx.ExecContext(ctx, q, checkName); err != nil {
 		return fmt.Errorf("UPDATE for deletion peer_checks: %v", err)
 	}
 
@@ -64,8 +70,9 @@
 	q = `
 		DELETE FROM peer_checks
 		WHERE delete = true
+		AND check_name = $1
 	`
-	if _, err := tx.ExecContext(ctx, q); err != nil {
+	if _, err := tx.ExecContext(ctx, q, checkName); err != nil {
 		return fmt.Errorf("DELETE FROM peer_checks: %v", err)
 	}
 
diff --git a/bgpwtf/cccampix/verifier/processor_irr.go b/bgpwtf/cccampix/verifier/processor_irr.go
index 49c4eb1..c6b355c 100644
--- a/bgpwtf/cccampix/verifier/processor_irr.go
+++ b/bgpwtf/cccampix/verifier/processor_irr.go
@@ -8,14 +8,13 @@
 	"sync"
 	"time"
 
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
 	"code.hackerspace.pl/hscloud/go/pki"
 	"github.com/golang/glog"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
-
-	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
-	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
 )
 
 const (
@@ -42,7 +41,10 @@
 	return "IRR"
 }
 
-func (i *irr) NextRun(now time.Time) time.Time {
+func (i *irr) NextRun(now time.Time, lastFailed bool) time.Time {
+	if lastFailed {
+		return now.Add(1 * time.Minute)
+	}
 	return now.Add(5 * time.Minute)
 }
 
@@ -175,7 +177,7 @@
 	<-pcrDone
 	<-pkDone
 
-	err = m.SubmitPeerCheckResults(ctx, pcr)
+	err = m.SubmitPeerCheckResults(ctx, "irr", pcr)
 	if err != nil {
 		return err
 	}
diff --git a/bgpwtf/cccampix/verifier/processor_peeringdb.go b/bgpwtf/cccampix/verifier/processor_peeringdb.go
index 8f29110..2d3c92d 100644
--- a/bgpwtf/cccampix/verifier/processor_peeringdb.go
+++ b/bgpwtf/cccampix/verifier/processor_peeringdb.go
@@ -5,11 +5,10 @@
 	"fmt"
 	"time"
 
-	"code.hackerspace.pl/hscloud/go/pki"
-	"google.golang.org/grpc"
-
 	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
 	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
+	"code.hackerspace.pl/hscloud/go/pki"
+	"google.golang.org/grpc"
 )
 
 type peeringDB struct {
@@ -31,7 +30,10 @@
 	return "PeeringDB"
 }
 
-func (p *peeringDB) NextRun(now time.Time) time.Time {
+func (p *peeringDB) NextRun(now time.Time, lastFailed bool) time.Time {
+	if lastFailed {
+		return now.Add(1 * time.Minute)
+	}
 	return now.Add(5 * time.Minute)
 }
 
diff --git a/bgpwtf/cccampix/verifier/processor_pgp.go b/bgpwtf/cccampix/verifier/processor_pgp.go
new file mode 100644
index 0000000..423cb7e
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/processor_pgp.go
@@ -0,0 +1,153 @@
+package main
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"sync"
+	"time"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
+	"github.com/golang/glog"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type pgp struct {
+	pgpc pb.PGPEncryptorClient
+}
+
+func newPGP(pgpc pb.PGPEncryptorClient) (*pgp, error) {
+	return &pgp{
+		pgpc: pgpc,
+	}, nil
+}
+
+func (p *pgp) Name() string {
+	return "PGP"
+}
+
+func (p *pgp) NextRun(now time.Time, lastRun bool) time.Time {
+	if lastRun {
+		return now.Add(1 * time.Minute)
+	}
+	return now.Add(15 * time.Minute)
+}
+
+func (p *pgp) RunAll(ctx context.Context, m model.Model) error {
+	keys, err := m.GetPGPKeysRequiringAttention(ctx)
+	if err != nil {
+		return fmt.Errorf("GetPGPKeysRequiringAttention: %v", err)
+	}
+
+	if len(keys) == 0 {
+		return nil
+	}
+
+	s := make(chan struct{}, 20)
+	errC := make(chan error, len(keys))
+	knownC := make(chan *model.PeerPGPKey, len(keys))
+	unknownC := make(chan *model.PeerPGPKey, len(keys))
+
+	var wg sync.WaitGroup
+	wg.Add(len(keys))
+
+	for _, key := range keys {
+		go func(k *model.PeerPGPKey) {
+			s <- struct{}{}
+			defer func() {
+				wg.Done()
+				<-s
+			}()
+
+			glog.Infof("PGP: Processing %v", *k)
+
+			// HACK(q3k)
+			if k.State == "known" {
+				knownC <- k
+				return
+			}
+
+			fp, err := hex.DecodeString(k.Fingerprint)
+			if err != nil {
+				errC <- fmt.Errorf("could not decode fingerprint %q: %v", k.Fingerprint, err)
+				return
+			}
+
+			req := &pb.KeyInfoRequest{
+				Fingerprint: fp,
+				Caching:     pb.KeyInfoRequest_CACHING_FORCE_REMOTE,
+			}
+
+			_, err = p.pgpc.KeyInfo(ctx, req)
+			s, ok := status.FromError(err)
+			switch {
+			case err == nil:
+				knownC <- k
+			case ok && s.Code() == codes.NotFound:
+				unknownC <- k
+			default:
+				errC <- err
+			}
+		}(key)
+	}
+
+	wg.Wait()
+	close(errC)
+	close(knownC)
+	close(unknownC)
+
+	pcr := []*model.PeerCheckResult{}
+
+	positive := []string{}
+	for p := range knownC {
+		positive = append(positive, p.Fingerprint)
+		pcr = append(pcr, &model.PeerCheckResult{
+			PeerASN:   p.PeerASN,
+			CheckName: "pgp",
+			Time:      time.Now(),
+			Status:    model.PeerCheckStatus_Okay,
+		})
+	}
+	negative := []string{}
+	for n := range unknownC {
+		negative = append(negative, n.Fingerprint)
+		pcr = append(pcr, &model.PeerCheckResult{
+			PeerASN:   n.PeerASN,
+			CheckName: "pgp",
+			Time:      time.Now(),
+			Status:    model.PeerCheckStatus_Failed,
+			Message:   fmt.Sprintf("key %q not found on keyservers", n.Fingerprint),
+		})
+	}
+
+	glog.Infof("%v, %v", positive, negative)
+
+	if len(positive) > 0 || len(negative) > 0 {
+		err := m.ValidatePGPKeys(ctx, positive, negative)
+		if err != nil {
+			return fmt.Errorf("ValidatePGPKeys(%v, %v): %v", positive, negative, err)
+		}
+	}
+
+	if len(pcr) > 0 {
+		err = m.SubmitPeerCheckResults(ctx, "pgp", pcr)
+		if err != nil {
+			return err
+		}
+	}
+
+	errs := []error{}
+	for err := range errC {
+		errs = append(errs, err)
+	}
+
+	if len(errs) > 0 {
+		glog.Errorf("Errors while processing keys: %v", errs)
+		return fmt.Errorf("Errors ocurred while processing keys")
+	}
+
+	return nil
+
+}
diff --git a/bgpwtf/cccampix/verifier/processor_rpki.go b/bgpwtf/cccampix/verifier/processor_rpki.go
index b00aed2..3f16602 100644
--- a/bgpwtf/cccampix/verifier/processor_rpki.go
+++ b/bgpwtf/cccampix/verifier/processor_rpki.go
@@ -29,7 +29,7 @@
 	return "RPKI"
 }
 
-func (p *rpki) NextRun(now time.Time) time.Time {
+func (p *rpki) NextRun(now time.Time, lastFailed bool) time.Time {
 	return now.Add(1 * time.Minute)
 }
 
diff --git a/bgpwtf/cccampix/verifier/processor_secretgen.go b/bgpwtf/cccampix/verifier/processor_secretgen.go
index cefa1cc..2820fa4 100644
--- a/bgpwtf/cccampix/verifier/processor_secretgen.go
+++ b/bgpwtf/cccampix/verifier/processor_secretgen.go
@@ -19,7 +19,7 @@
 	return "SecretGen"
 }
 
-func (p *secretGen) NextRun(now time.Time) time.Time {
+func (p *secretGen) NextRun(now time.Time, lastFailed bool) time.Time {
 	return now.Add(1 * time.Minute)
 }
 
diff --git a/bgpwtf/cccampix/verifier/processors.go b/bgpwtf/cccampix/verifier/processors.go
index ff70e9d..3b88fe8 100644
--- a/bgpwtf/cccampix/verifier/processors.go
+++ b/bgpwtf/cccampix/verifier/processors.go
@@ -9,7 +9,7 @@
 
 type processor interface {
 	Name() string
-	NextRun(time.Time) time.Time
+	NextRun(lastRun time.Time, lastFailed bool) time.Time
 
 	RunAll(ctx context.Context, m model.Model) error
 }
diff --git a/bgpwtf/cccampix/verifier/service.go b/bgpwtf/cccampix/verifier/service.go
new file mode 100644
index 0000000..e00b85f
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/service.go
@@ -0,0 +1,362 @@
+package main
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"io"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
+	"github.com/golang/glog"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+func (s *service) ProcessorStatus(ctx context.Context, req *pb.ProcessorStatusRequest) (*pb.ProcessorStatusResponse, error) {
+	s.processorsMu.RLock()
+	defer s.processorsMu.RUnlock()
+
+	res := &pb.ProcessorStatusResponse{
+		Processors: make([]*pb.ProcessorStatusResponse_Processor, len(s.processors)),
+	}
+
+	i := 0
+	for _, p := range s.processors {
+		res.Processors[i] = &pb.ProcessorStatusResponse_Processor{
+			Name:    p.name,
+			Status:  pb.ProcessorStatusResponse_Processor_STATUS_OK,
+			LastRun: 0,
+			NextRun: 0,
+		}
+
+		if p.lastRun != nil {
+			res.Processors[i].LastRun = p.lastRun.UnixNano()
+			res.Processors[i].NextRun = p.p.NextRun(*p.lastRun, p.lastErr != nil).UnixNano()
+		}
+
+		if p.lastErr != nil {
+			res.Processors[i].Status = pb.ProcessorStatusResponse_Processor_STATUS_ERROR
+		}
+
+		i += 1
+	}
+	return res, nil
+}
+
+func (s *service) PeerSummary(req *pb.PeerSummaryRequest, stream pb.Verifier_PeerSummaryServer) error {
+	peers, err := s.model.GetCheckablePeers(stream.Context())
+	if err != nil {
+		glog.Errorf("model.GetCheckablePeers: %v", err)
+		return status.Error(codes.Unavailable, "model error")
+	}
+
+	asns := make([]int64, len(peers))
+	asnToRes := make(map[int64]*pb.PeerSummaryResponse)
+
+	for i, peer := range peers {
+		routers := make([]*pb.PeeringDBMember_Router, len(peer.Routers))
+		for i, router := range peer.Routers {
+			routers[i] = &pb.PeeringDBMember_Router{}
+			if router.V4 != nil {
+				routers[i].Ipv4 = router.V4.String()
+			}
+			if router.V6 != nil {
+				routers[i].Ipv6 = router.V6.String()
+			}
+		}
+		p := &pb.PeeringDBMember{
+			Asn:     peer.ASN,
+			Name:    peer.Name,
+			Routers: routers,
+		}
+		res := &pb.PeerSummaryResponse{
+			PeeringdbInfo: p,
+			CheckStatus:   pb.PeerSummaryResponse_STATUS_OK,
+		}
+		asnToRes[peer.ASN] = res
+		asns[i] = peer.ASN
+	}
+
+	checkres, err := s.model.GetPeerCheckResults(stream.Context(), asns)
+	if err != nil {
+		glog.Errorf("GetPeerCheckResults(%v): %v", asns, err)
+		for _, res := range asnToRes {
+			res.CheckStatus = pb.PeerSummaryResponse_STATUS_UNKNOWN
+		}
+	} else {
+		passedChecks := make(map[int64]map[string]bool)
+		for _, c := range checkres {
+			if _, ok := passedChecks[c.PeerASN]; !ok {
+				passedChecks[c.PeerASN] = make(map[string]bool)
+			}
+			passedChecks[c.PeerASN][c.CheckName] = c.Status == model.PeerCheckStatus_Okay
+		}
+
+		for asn, checks := range passedChecks {
+			for _, required := range s.requiredChecks {
+				if !checks[required] {
+					asnToRes[asn].CheckStatus = pb.PeerSummaryResponse_STATUS_FAILED
+					break
+				}
+			}
+		}
+	}
+
+	for _, res := range asnToRes {
+		if err := stream.Send(res); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *service) PeerDetails(ctx context.Context, req *pb.PeerDetailsRequest) (*pb.PeerDetailsResponse, error) {
+	if req.Asn <= 0 {
+		return nil, status.Error(codes.InvalidArgument, "asn must be set")
+	}
+
+	res := &pb.PeerDetailsResponse{}
+
+	peeringdb, err := s.model.GetPeeringDBPeer(ctx, req.Asn)
+	if err != nil {
+		glog.Errorf("GetPeeringDBPeer(%v): %v", req.Asn, err)
+		return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
+	}
+
+	if peeringdb.Asn != req.Asn {
+		return nil, status.Error(codes.NotFound, "no such ASN")
+	}
+
+	res.PeeringdbInfo = peeringdb
+
+	checkres, err := s.model.GetPeerCheckResults(ctx, []int64{req.Asn})
+	if err != nil {
+		glog.Errorf("GetPeerCheckResults(%v): %v", req.Asn, err)
+		return nil, status.Error(codes.Unavailable, "could not get check results")
+	}
+
+	res.Checks = make([]*pb.PeerDetailsResponse_Check, len(checkres))
+	for i, check := range checkres {
+		status := pb.PeerDetailsResponse_Check_STATUS_INVALID
+		switch check.Status {
+		case model.PeerCheckStatus_Okay:
+			status = pb.PeerDetailsResponse_Check_STATUS_OK
+		case model.PeerCheckStatus_SoftFailed:
+			status = pb.PeerDetailsResponse_Check_STATUS_OK
+		case model.PeerCheckStatus_Failed:
+			status = pb.PeerDetailsResponse_Check_STATUS_FAILED
+		}
+		res.Checks[i] = &pb.PeerDetailsResponse_Check{
+			Name:   check.CheckName,
+			Status: status,
+			Time:   check.Time.UnixNano(),
+			Msg:    check.Message,
+		}
+	}
+
+	prefixes, err := s.model.GetAllowedPrefixes(ctx, req.Asn)
+	if err != nil {
+		glog.Errorf("GetAllowedPrefixes(%v): %v", req.Asn, err)
+		return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
+	}
+
+	res.AllowedPrefixes = make([]*pb.PeerDetailsResponse_AllowedPrefix, len(prefixes))
+	for i, prefix := range prefixes {
+		res.AllowedPrefixes[i] = &pb.PeerDetailsResponse_AllowedPrefix{
+			Prefix:    prefix.Prefix.String(),
+			MaxLength: prefix.MaxLength,
+			Ta:        prefix.TA,
+		}
+	}
+
+	return res, nil
+}
+
+func (s *service) RouterHeartbeat(ctx context.Context, req *pb.RouterHeartbeatRequest) (*pb.RouterHeartbeatResponse, error) {
+	if req.Name == "" {
+		return nil, status.Error(codes.InvalidArgument, "name must be set")
+	}
+
+	pcfgM := make(map[string]*model.PeerConfiguration)
+	pcfgs, err := s.model.GetPeerConfiguration(ctx)
+	if err != nil {
+		glog.Errorf("GetPeerConfiguration: %v", err)
+		return nil, status.Error(codes.Unavailable, "could not get peer configs")
+	}
+
+	for _, pcfg := range pcfgs {
+		ask := fmt.Sprintf("AS%d", pcfg.Peer.ASN)
+		pcfgM[ask] = pcfg
+	}
+
+	peers, err := s.model.GetCheckablePeers(ctx)
+	if err != nil {
+		glog.Errorf("GetChecablePeers: %v", err)
+		return nil, status.Error(codes.Unavailable, "could not get peers")
+	}
+
+	asconfs := make(map[string]*pb.RouterHeartbeatResponse_ASConfig)
+
+	for _, peer := range peers {
+		as := fmt.Sprintf("AS%d", peer.ASN)
+
+		pcfg, ok := pcfgM[as]
+		if !ok {
+			continue
+		}
+
+		asconfs[as] = &pb.RouterHeartbeatResponse_ASConfig{
+			Asn:      peer.ASN,
+			Routers:  make([]*pb.RouterHeartbeatResponse_ASConfig_Router, len(pcfg.Peer.Routers)),
+			Prefixes: []*pb.RouterHeartbeatResponse_ASConfig_AllowedPrefix{},
+		}
+
+		glog.Infof("%+v", pcfg.Peer.Routers)
+		for i, r := range pcfg.Peer.Routers {
+			ipv6 := ""
+			if r.V6 != nil {
+				ipv6 = r.V6.String()
+			}
+			ipv4 := ""
+			if r.V4 != nil {
+				ipv4 = r.V4.String()
+			}
+			asconfs[as].Routers[i] = &pb.RouterHeartbeatResponse_ASConfig_Router{
+				Ipv6:     ipv6,
+				Ipv4:     ipv4,
+				Password: r.Config.BGPSecret,
+			}
+		}
+
+		prefixes, err := s.model.GetAllowedPrefixes(ctx, peer.ASN)
+		if err != nil {
+			glog.Errorf("GetAllowedPrefixes(_, %d): %v", peer.ASN, err)
+			return nil, status.Error(codes.Unavailable, "could not get peer prefixes")
+		}
+
+		for _, prefix := range prefixes {
+			asconfs[as].Prefixes = append(asconfs[as].Prefixes, &pb.RouterHeartbeatResponse_ASConfig_AllowedPrefix{
+				Prefix:    prefix.Prefix.String(),
+				MaxLength: prefix.MaxLength,
+			})
+		}
+	}
+
+	res := &pb.RouterHeartbeatResponse{
+		AsConfigs: make([]*pb.RouterHeartbeatResponse_ASConfig, len(asconfs)),
+	}
+
+	i := 0
+	for _, asconf := range asconfs {
+		res.AsConfigs[i] = asconf
+		i += 1
+	}
+
+	return res, nil
+}
+
+func (s *service) PeerSecrets(ctx context.Context, req *pb.PeerSecretsRequest) (*pb.PeerSecretsResponse, error) {
+	if req.Asn <= 0 {
+		return nil, status.Error(codes.InvalidArgument, "asn must be set")
+	}
+	pcrs, err := s.model.GetPeerConfiguration(ctx)
+	if err != nil {
+		glog.Errorf("GetPeerConfiguration: %v", err)
+		return nil, status.Error(codes.Unavailable, "error when retrieving peer configs")
+	}
+
+	var pcr *model.PeerConfiguration
+	for _, p := range pcrs {
+		if p.Peer.ASN == req.Asn {
+			pcr = p
+			break
+		}
+	}
+
+	if pcr == nil {
+		return nil, status.Error(codes.NotFound, "no such ASN")
+	}
+
+	plain := fmt.Sprintf(`
+Hello AS %d!
+
+Here are your config settings:
+`, req.Asn)
+
+	for _, router := range pcr.Peer.Routers {
+		if router.V4 != nil {
+			plain += fmt.Sprintf(`
+our addresses: 185.236.243.5 (rs1), 185.236.243.6 (rs2)
+      our asn: 208521
+ your address: %s
+     your asn: %d
+   bgp secret: %s
+`, router.V4.String(), req.Asn, router.Config.BGPSecret)
+		}
+		if router.V6 != nil {
+			plain += fmt.Sprintf(`
+our addresses: 2a0d:eb02:4242:4242::5 (rs1), 2a0d:eb02:4242:4242::6 (rs2)
+      our asn: 208521
+ your address: %s
+     your asn: %d
+   bgp secret: %s
+`, router.V6.String(), req.Asn, router.Config.BGPSecret)
+		}
+	}
+
+	plain += `
+Happy exchanging!
+bgp.wtf (DECT: 4735)
+`
+
+	key, err := s.model.GetPeerPGPKey(ctx, req.Asn)
+	if err != nil {
+		glog.Errorf("GetPeerPGPKey: %v", err)
+		return nil, status.Error(codes.Unavailable, "could not get pgp key")
+	}
+
+	plainB := []byte(plain)
+
+	stream, err := s.pgp.Encrypt(ctx)
+	if err != nil {
+		glog.Errorf("Encrypt: %v", err)
+		return nil, status.Error(codes.Unavailable, "could not encrypt")
+	}
+
+	fingerprint, err := hex.DecodeString(key.Fingerprint)
+	if err != nil {
+		glog.Errorf("Invalid fingerprint %q: %v", key.Fingerprint, err)
+		return nil, status.Error(codes.Unavailable, "could not encrypt")
+	}
+
+	reqE := &pb.EncryptRequest{
+		Data:        plainB,
+		Info:        pb.EncryptRequest_CHUNK_LAST,
+		Fingerprint: fingerprint,
+	}
+
+	if err := stream.Send(reqE); err != nil {
+		glog.Errorf("Encrypt.Send: %v", err)
+		return nil, status.Error(codes.Unavailable, "could not encrypt")
+	}
+	stream.CloseSend()
+
+	cipher := []byte{}
+	for {
+		in, err := stream.Recv()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			glog.Errorf("Encrypt.Recv: %v", err)
+			return nil, status.Error(codes.Unavailable, "could not encrypt")
+		}
+		cipher = append(cipher, in.Data...)
+	}
+
+	return &pb.PeerSecretsResponse{
+		PgpData: cipher,
+	}, nil
+}
diff --git a/devtools/prodimage/Dockerfile b/devtools/prodimage/Dockerfile
index cb5e7db..9cdf8a2 100644
--- a/devtools/prodimage/Dockerfile
+++ b/devtools/prodimage/Dockerfile
@@ -11,5 +11,6 @@
         gnupg2 \
         rsync \
         python \
+        gnupg2 \
         python3 ;\
     rm -rf /var/lib/apt/lists