bgpwtf/cccampix: draw the rest of the fucking owl

Change-Id: I49fd5906e69512e8f2d414f406edc0179522f225
diff --git a/bgpwtf/cccampix/verifier/model/BUILD.bazel b/bgpwtf/cccampix/verifier/model/BUILD.bazel
new file mode 100644
index 0000000..bff1eeb
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/BUILD.bazel
@@ -0,0 +1,28 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "allowed_prefixes.go",
+        "checkable_peers.go",
+        "config.go",
+        "get_checks.go",
+        "model.go",
+        "peer_routers.go",
+        "peers.go",
+        "pgp.go",
+        "schema.go",
+        "submit_checks.go",
+    ],
+    importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//bgpwtf/cccampix/proto:go_default_library",
+        "//bgpwtf/cccampix/verifier/model/migrations:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_migrate_migrate_v4//:go_default_library",
+        "@com_github_golang_migrate_migrate_v4//database/cockroachdb:go_default_library",
+        "@com_github_jmoiron_sqlx//:go_default_library",
+        "@com_github_lib_pq//:go_default_library",
+    ],
+)
diff --git a/bgpwtf/cccampix/verifier/model/allowed_prefixes.go b/bgpwtf/cccampix/verifier/model/allowed_prefixes.go
new file mode 100644
index 0000000..e6b38d0
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/allowed_prefixes.go
@@ -0,0 +1,87 @@
+package model
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"net"
+	"time"
+)
+
+func (s *sqlModel) UpdateAllowedPrefixes(ctx context.Context, asn int64, prefixes []*AllowedPrefix) error {
+	tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	timestamp := time.Now().UnixNano()
+
+	for _, prefix := range prefixes {
+		q := `
+			INSERT INTO allowed_prefixes
+				(peer_id, timestamp, prefix, max_length, ta)
+			SELECT
+				peers.id, :timestamp, :prefix, :max_length, :ta
+			FROM peers
+			WHERE peers.asn = :asn
+			ON CONFLICT (peer_id, prefix)
+			DO UPDATE SET
+				timestamp = :timestamp,
+				max_length = :max_length,
+				ta = :ta
+		`
+		ap := sqlAllowedPrefix{
+			Timestamp: timestamp,
+			Prefix:    prefix.Prefix.String(),
+			MaxLength: prefix.MaxLength,
+			TA:        prefix.TA,
+			ASN:       fmt.Sprintf("%d", asn),
+		}
+
+		if _, err := tx.NamedExecContext(ctx, q, ap); err != nil {
+			return fmt.Errorf("INSERT allowed_prefixes: %v", err)
+		}
+	}
+
+	q := `
+		DELETE FROM allowed_prefixes
+		WHERE timestamp != $1
+		  AND peer_id = (SELECT peers.id FROM peers WHERE peers.asn = $2)
+	`
+	if _, err := tx.ExecContext(ctx, q, timestamp, asn); err != nil {
+		return fmt.Errorf("DELETE FROM allowed_prefixes: %v", err)
+	}
+
+	return tx.Commit()
+}
+
+func (s *sqlModel) GetAllowedPrefixes(ctx context.Context, asn int64) ([]*AllowedPrefix, error) {
+	q := `
+		SELECT
+			allowed_prefixes.prefix,
+			allowed_prefixes.max_length,
+			allowed_prefixes.ta
+		FROM
+			allowed_prefixes
+		LEFT JOIN peers
+		ON peers.id = allowed_prefixes.peer_id
+		WHERE peers.asn = $1
+	`
+	data := []sqlAllowedPrefix{}
+	if err := s.db.SelectContext(ctx, &data, q, asn); err != nil {
+		return nil, fmt.Errorf("SELECT allowed_prefixes: %v", err)
+	}
+
+	res := make([]*AllowedPrefix, len(data))
+	for i, d := range data {
+		_, prefix, err := net.ParseCIDR(d.Prefix)
+		if err != nil {
+			return nil, fmt.Errorf("corrupted CIDR in database: %v", err)
+		}
+		res[i] = &AllowedPrefix{
+			Prefix:    *prefix,
+			MaxLength: d.MaxLength,
+			TA:        d.TA,
+		}
+	}
+
+	return res, nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/checkable_peers.go b/bgpwtf/cccampix/verifier/model/checkable_peers.go
new file mode 100644
index 0000000..2fe133f
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/checkable_peers.go
@@ -0,0 +1,76 @@
+package model
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+)
+
+func (m *sqlModel) GetCheckablePeers(ctx context.Context) ([]*Peer, error) {
+
+	data := []struct {
+		sqlPeer       `db:"peers"`
+		sqlPeerRouter `db:"peer_routers"`
+	}{}
+	q := `
+		SELECT
+			peers.id "peers.id",
+			peers.asn "peers.asn",
+			peers.name "peers.name",
+
+			peer_routers.peer_id "peer_routers.peer_id",
+			peer_routers.v6 "peer_routers.v6",
+			peer_routers.v4 "peer_routers.v4"
+		FROM peers
+		LEFT JOIN peer_routers
+		ON peer_routers.peer_id = peers.id
+	`
+	if err := m.db.SelectContext(ctx, &data, q); err != nil {
+		return nil, fmt.Errorf("SELECT peers/peerRouters: %v", err)
+	}
+
+	// Collapse peers into map
+	// ID -> Peer
+	peers := make(map[string]*Peer)
+
+	for _, row := range data {
+		peer, ok := peers[row.sqlPeer.ID]
+		if !ok {
+			asn, err := strconv.ParseInt(row.sqlPeer.ASN, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("data corruption: invalid ASN %q", row.sqlPeer.ASN)
+			}
+			peer = &Peer{
+				ASN:     asn,
+				Name:    row.sqlPeer.Name,
+				Routers: []*Router{},
+			}
+			peers[row.sqlPeer.ID] = peer
+		}
+
+		var v6 net.IP
+		var v4 net.IP
+
+		if row.sqlPeerRouter.V6.Valid {
+			v6 = net.ParseIP(row.sqlPeerRouter.V6.String)
+		}
+		if row.sqlPeerRouter.V4.Valid {
+			v4 = net.ParseIP(row.sqlPeerRouter.V4.String)
+		}
+
+		peer.Routers = append(peer.Routers, &Router{
+			V6: v6,
+			V4: v4,
+		})
+	}
+
+	res := make([]*Peer, len(peers))
+	i := 0
+	for _, peer := range peers {
+		res[i] = peer
+		i += 1
+	}
+
+	return res, nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/config.go b/bgpwtf/cccampix/verifier/model/config.go
new file mode 100644
index 0000000..bafd46f
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/config.go
@@ -0,0 +1,51 @@
+package model
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+)
+
+func (m *sqlModel) ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error {
+	tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	q := `
+		SELECT
+			peer_routers.peer_id "peer_id",
+			peer_routers.id "id"
+		FROM peer_routers
+		WHERE peer_routers.id NOT IN (
+			SELECT session_configs.peer_router_id
+			FROM session_configs
+		)
+	`
+	missing := []struct {
+		PeerID string `db:"peer_id"`
+		ID     string `db:"id"`
+	}{}
+	if err := m.db.SelectContext(ctx, &missing, q); err != nil {
+		return fmt.Errorf("SELECT peerRouters: %v", err)
+	}
+
+	for _, m := range missing {
+		config := gen()
+		q = `
+			INSERT INTO
+				session_configs 
+				(peer_id, peer_router_id, bgp_secret)
+			VALUES
+				(:peer_id, :peer_router_id, :bgp_secret)
+		`
+		data := sqlSessionConfig{
+			PeerID:       m.PeerID,
+			PeerRouterID: m.ID,
+			BGPSecret:    config.BGPSecret,
+		}
+		if _, err := tx.NamedExecContext(ctx, q, data); err != nil {
+			return err
+		}
+	}
+
+	return tx.Commit()
+}
diff --git a/bgpwtf/cccampix/verifier/model/get_checks.go b/bgpwtf/cccampix/verifier/model/get_checks.go
new file mode 100644
index 0000000..0879427
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/get_checks.go
@@ -0,0 +1,70 @@
+package model
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+	"time"
+
+	"github.com/golang/glog"
+	"github.com/jmoiron/sqlx"
+)
+
+func (s *sqlModel) GetPeerCheckResults(ctx context.Context, asn []int64) ([]*PeerCheckResult, error) {
+	asns := make([]string, len(asn))
+	for i, asn := range asn {
+		asns[i] = fmt.Sprintf("%d", asn)
+	}
+
+	data := []struct {
+		sqlPeer      `db:"peers"`
+		sqlPeerCheck `db:"peer_checks"`
+	}{}
+	q := `
+		SELECT
+			peers.asn                 "peers.asn",
+			peer_checks.check_name    "peer_checks.check_name",
+			peer_checks.check_time    "peer_checks.check_time",
+			peer_checks.check_status  "peer_checks.check_status",
+			peer_checks.check_message "peer_checks.check_message"
+		FROM peers
+		LEFT JOIN peer_checks
+		ON peers.id = peer_checks.peer_id
+		WHERE peers.asn IN (?)
+	`
+	query, args, err := sqlx.In(q, asns)
+	if err != nil {
+		return nil, fmt.Errorf("SELECT peers: %v", err)
+	}
+
+	query = s.db.Rebind(query)
+	if err := s.db.SelectContext(ctx, &data, query, args...); err != nil {
+		return nil, fmt.Errorf("SELECT peers: %v", err)
+	}
+
+	res := make([]*PeerCheckResult, len(data))
+	for i, d := range data {
+		asn, err := strconv.ParseInt(d.sqlPeer.ASN, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		status := PeerCheckStatus_Invalid
+		switch d.sqlPeerCheck.CheckStatus {
+		case "okay":
+			status = PeerCheckStatus_Okay
+		case "failed":
+			status = PeerCheckStatus_Failed
+		default:
+			glog.Errorf("Unhandled check status %q", d.sqlPeerCheck.CheckStatus)
+		}
+		res[i] = &PeerCheckResult{
+			PeerASN:   asn,
+			CheckName: d.sqlPeerCheck.CheckName,
+			Time:      time.Unix(0, d.sqlPeerCheck.CheckTime),
+			Status:    status,
+			Message:   d.sqlPeerCheck.CheckMessage,
+		}
+	}
+
+	return res, nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.down.sql
new file mode 100644
index 0000000..d0ba745
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.down.sql
@@ -0,0 +1,2 @@
+DROP TABLE peers;
+DROP TABLE peer_routers;
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.up.sql
new file mode 100644
index 0000000..bab0c35
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564664855_initial.up.sql
@@ -0,0 +1,27 @@
+CREATE TABLE peers (
+        id  UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+        asn STRING NOT NULL,
+        name STRING NOT NULL,
+        source STRING check (
+            source = 'from-peeringdb' or
+            source = 'manual'
+        ) NOT NULL,
+        UNIQUE (asn)
+);
+
+CREATE TABLE peer_routers (
+        peer_id UUID NOT NULL,
+        id UUID DEFAULT gen_random_uuid(),
+
+        v6 STRING,
+        v4 STRING,
+        source STRING check (
+            source = 'from-peeringdb' or
+            source = 'manual'
+        ) NOT NULL,
+
+        UNIQUE (v4),
+        UNIQUE (v6),
+        PRIMARY KEY (peer_id, id),
+        CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
+) INTERLEAVE IN PARENT peers (peer_id);
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.down.sql
new file mode 100644
index 0000000..c47fec6
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.down.sql
@@ -0,0 +1 @@
+DROP TABLE peer_checks;
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.up.sql
new file mode 100644
index 0000000..247c9eb
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564751360_membership.up.sql
@@ -0,0 +1,18 @@
+CREATE TABLE peer_checks (
+    peer_id UUID NOT NULL,
+    id UUID DEFAULT gen_random_uuid(),
+
+    check_name STRING NOT NULL,
+    check_time INT NOT NULL,
+    check_status STRING check (
+        check_status = 'unknown' or
+        check_status = 'okay' or
+        check_status = 'failed'
+    ) NOT NULL,
+    check_message STRING NOT NULL,
+    delete BOOL NOT NULL,
+
+    UNIQUE(peer_id, check_name),
+    PRIMARY KEY (peer_id, id),
+    CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
+) INTERLEAVE IN PARENT peers (peer_id);
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.down.sql
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.down.sql
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.up.sql
new file mode 100644
index 0000000..6c6f752
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1564867563_pgp.up.sql
@@ -0,0 +1,11 @@
+CREATE TABLE peer_pgp_keys (
+    peer_id UUID NOT NULL,
+    id UUID DEFAULT gen_random_uuid(),
+
+    fingerprint STRING NOT NULL,
+    time_created INT NOT NULL,
+
+    UNIQUE (peer_id),
+    PRIMARY KEY (peer_id, id),
+    CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
+) INTERLEAVE IN PARENT peers (peer_id);
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.down.sql
new file mode 100644
index 0000000..f286d79
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.down.sql
@@ -0,0 +1 @@
+DROP TABLE session_configs;
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.up.sql
new file mode 100644
index 0000000..7a1d07b
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565471293_session_configs.up.sql
@@ -0,0 +1,11 @@
+CREATE TABLE session_configs (
+    peer_id UUID NOT NULL,
+    peer_router_id UUID NOT NULL,
+    id UUID DEFAULT gen_random_uuid(),
+
+    bgp_secret STRING NOT NULL,
+
+    UNIQUE (peer_router_id),
+    PRIMARY KEY (peer_id, peer_router_id, id),
+    CONSTRAINT fk_peer_router FOREIGN KEY (peer_id, peer_router_id) REFERENCES peer_routers ON DELETE CASCADE
+) INTERLEAVE IN PARENT peer_routers (peer_id, peer_router_id);
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.down.sql b/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.down.sql
new file mode 100644
index 0000000..d0a98f3
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.down.sql
@@ -0,0 +1 @@
+DROP TABLE allowed_prefix;
diff --git a/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.up.sql b/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.up.sql
new file mode 100644
index 0000000..c033e3d
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/1565545845_allowed_prefixes.up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE allowed_prefixes (
+    peer_id UUID NOT NULL,
+    id UUID DEFAULT gen_random_uuid(),
+    timestamp INT NOT NULL,
+
+    prefix STRING NOT NULL,
+    max_length INT NOT NULL,
+    ta STRING NOT NULL,
+
+    PRIMARY KEY (peer_id, id),
+    UNIQUE (peer_id, prefix),
+    CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
+) INTERLEAVE IN PARENT peers (peer_id);
diff --git a/bgpwtf/cccampix/verifier/model/migrations/BUILD.bazel b/bgpwtf/cccampix/verifier/model/migrations/BUILD.bazel
new file mode 100644
index 0000000..b20d35f
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//extras:embed_data.bzl", "go_embed_data")
+
+go_embed_data(
+    name = "migrations_data",
+    srcs = glob(["*.sql"]),
+    package = "migrations",
+    flatten = True,
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "migrations.go",
+        ":migrations_data",  # keep
+    ],
+    importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model/migrations",
+    visibility = ["//bgpwtf/cccampix/verifier/model:__subpackages__"],
+    deps = [
+        "//go/mirko:go_default_library",
+        "@com_github_golang_migrate_migrate_v4//:go_default_library",
+    ],
+)
diff --git a/bgpwtf/cccampix/verifier/model/migrations/migrations.go b/bgpwtf/cccampix/verifier/model/migrations/migrations.go
new file mode 100644
index 0000000..1782c2e
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/migrations/migrations.go
@@ -0,0 +1,17 @@
+package migrations
+
+import (
+	"fmt"
+
+	"code.hackerspace.pl/hscloud/go/mirko"
+
+	"github.com/golang-migrate/migrate/v4"
+)
+
+func New(dburl string) (*migrate.Migrate, error) {
+	source, err := mirko.NewMigrationsFromBazel(Data)
+	if err != nil {
+		return nil, fmt.Errorf("could not create migrations: %v", err)
+	}
+	return migrate.NewWithSourceInstance("bazel", source, dburl)
+}
diff --git a/bgpwtf/cccampix/verifier/model/model.go b/bgpwtf/cccampix/verifier/model/model.go
new file mode 100644
index 0000000..b9b81c9
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/model.go
@@ -0,0 +1,141 @@
+package model
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strings"
+	"time"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model/migrations"
+	migrate "github.com/golang-migrate/migrate/v4"
+	_ "github.com/golang-migrate/migrate/v4/database/cockroachdb"
+	"github.com/jmoiron/sqlx"
+	_ "github.com/lib/pq"
+)
+
+type Model interface {
+	MigrateUp() error
+
+	RecordPeeringDBPeers(ctx context.Context, members []*pb.PeeringDBMember) error
+	RecordPeeringDBPeerRouters(ctx context.Context, members []*pb.PeeringDBMember) error
+	GetPeeringDBPeer(ctx context.Context, asn int64) (*pb.PeeringDBMember, error)
+
+	GetCheckablePeers(ctx context.Context) ([]*Peer, error)
+	SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error
+	GetPeerCheckResults(ctx context.Context, asn []int64) ([]*PeerCheckResult, error)
+
+	UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error
+
+	ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error
+
+	UpdateAllowedPrefixes(ctx context.Context, asn int64, prefixes []*AllowedPrefix) error
+	GetAllowedPrefixes(ctx context.Context, asn int64) ([]*AllowedPrefix, error)
+}
+
+type stringer struct {
+}
+
+func (s *stringer) String() string {
+	if s == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *s)
+}
+
+type Router struct {
+	stringer
+	V6 net.IP
+	V4 net.IP
+}
+
+type Peer struct {
+	stringer
+	ASN     int64
+	Name    string
+	Routers []*Router
+}
+
+type PeerCheckStatus int
+
+const (
+	PeerCheckStatus_Invalid PeerCheckStatus = iota
+	PeerCheckStatus_Okay
+	PeerCheckStatus_Failed
+	PeerCheckStatus_SoftFailed
+)
+
+type PeerCheckResult struct {
+	PeerASN   int64
+	CheckName string
+	Time      time.Time
+	Status    PeerCheckStatus
+	Message   string
+}
+
+func (p *PeerCheckResult) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
+}
+
+type PeerPGPKey struct {
+	stringer
+	PeerASN     int64
+	Fingerprint string
+}
+
+type SessionConfig struct {
+	BGPSecret string
+}
+
+type AllowedPrefix struct {
+	Prefix    net.IPNet
+	MaxLength int64
+	TA        string
+}
+
+func (p *AllowedPrefix) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("%+v", *p)
+}
+
+type sqlModel struct {
+	db  *sqlx.DB
+	dsn string
+}
+
+func Connect(ctx context.Context, driver, dsn string) (Model, error) {
+	if dsn == "" {
+		return nil, fmt.Errorf("dsn cannot be empty")
+	}
+
+	db, err := sqlx.ConnectContext(ctx, driver, dsn)
+	if err != nil {
+		return nil, fmt.Errorf("could not connect to database: %v", err)
+	}
+
+	return &sqlModel{
+		db:  db,
+		dsn: dsn,
+	}, nil
+}
+
+func (m *sqlModel) MigrateUp() error {
+	dsn := "cockroach://" + strings.TrimPrefix(m.dsn, "postgres://")
+	mig, err := migrations.New(dsn)
+	if err != nil {
+		return err
+	}
+	err = mig.Up()
+	switch err {
+	case migrate.ErrNoChange:
+		return nil
+	default:
+		return err
+	}
+}
diff --git a/bgpwtf/cccampix/verifier/model/peer_routers.go b/bgpwtf/cccampix/verifier/model/peer_routers.go
new file mode 100644
index 0000000..853b9e5
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/peer_routers.go
@@ -0,0 +1,128 @@
+package model
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"github.com/golang/glog"
+	"github.com/jmoiron/sqlx"
+)
+
+func (m *sqlModel) RecordPeeringDBPeerRouters(ctx context.Context, members []*pb.PeeringDBMember) error {
+	tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	for _, member := range members {
+		// Get existing routers for peer.
+		q := `
+			SELECT peer_routers.id, peer_routers.v4, peer_routers.v6
+			FROM peer_routers
+			LEFT JOIN peers ON (peer_routers.peer_id = peers.id)
+			WHERE peer_routers.source = 'from-peeringdb'
+			  AND peers.asn = ?
+		`
+		q = tx.Rebind(q)
+		existing := []sqlPeerRouter{}
+		if err := tx.SelectContext(ctx, &existing, q, fmt.Sprintf("%d", member.Asn)); err != nil {
+			return fmt.Errorf("SELECT peerRouters: %v", err)
+		}
+
+		// Find all routers that need to be deleted because they're outdated.
+		// We do not attempt updates, only removals/recreations.
+
+		// UUID -> bool
+		toDelete := make(map[string]bool)
+
+		for _, ex := range existing {
+			// Try to find a requested router with same IP addresses.
+			found := false
+			for _, router := range member.Routers {
+				if router.Ipv4 == ex.V4.String && router.Ipv6 == ex.V6.String {
+					found = true
+					break
+				}
+			}
+
+			// Not found, mark for deletion.
+			if !found {
+				toDelete[ex.ID] = true
+			}
+		}
+
+		// Find all routers that need to be created.
+		toAdd := []sqlPeerRouter{}
+		for _, router := range member.Routers {
+			// Try to find an existing router with same IP addresses.
+			found := false
+			for _, ex := range existing {
+				if router.Ipv4 == ex.V4.String && router.Ipv6 == ex.V6.String {
+					found = true
+					break
+				}
+			}
+			// Not found, mark for creation.
+			if !found {
+				ta := sqlPeerRouter{
+					Source: "from-peeringdb",
+					ASN:    fmt.Sprintf("%d", member.Asn),
+				}
+				if router.Ipv6 != "" {
+					ta.V6.String = router.Ipv6
+					ta.V6.Valid = true
+				}
+				if router.Ipv4 != "" {
+					ta.V4.String = router.Ipv4
+					ta.V4.Valid = true
+				}
+				toAdd = append(toAdd, ta)
+			}
+		}
+
+		if len(toDelete) > 0 {
+			glog.Infof("RecordPeeringDBPeers: deleting %v", toDelete)
+		}
+		if len(toAdd) > 0 {
+			glog.Infof("RecordPeeringDBPeers: adding %+v", toAdd)
+		}
+
+		// Delete any routers, if needed.
+		if len(toDelete) > 0 {
+			// Get list of IDs to delete.
+			deleteIds := make([]string, len(toDelete))
+			i := 0
+			for id, _ := range toDelete {
+				deleteIds[i] = id
+				i += 1
+			}
+			query, args, err := sqlx.In("DELETE FROM peer_Routers WHERE id IN (?)", deleteIds)
+			if err != nil {
+				return fmt.Errorf("DELETE peerRouters: %v", err)
+			}
+			query = tx.Rebind(query)
+			_, err = tx.ExecContext(ctx, query, args...)
+			if err != nil {
+				return fmt.Errorf("DELETE peerRouters: %v", err)
+			}
+		}
+
+		// Add any routers, if needed.
+		for _, ta := range toAdd {
+			q := `
+				INSERT INTO peer_routers
+					(peer_id, v6, v4, source)
+				SELECT
+					peers.id, :v6, :v4, :source
+				FROM
+					peers
+				WHERE peers.asn = :asn
+			`
+			if _, err := tx.NamedExecContext(ctx, q, ta); err != nil {
+				return fmt.Errorf("INSERT peerRouters: %v", err)
+			}
+		}
+	}
+
+	return tx.Commit()
+}
diff --git a/bgpwtf/cccampix/verifier/model/peers.go b/bgpwtf/cccampix/verifier/model/peers.go
new file mode 100644
index 0000000..1dc0e8f
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/peers.go
@@ -0,0 +1,199 @@
+package model
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"strconv"
+	"strings"
+
+	pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
+	"github.com/golang/glog"
+	"github.com/jmoiron/sqlx"
+)
+
+func (m *sqlModel) RecordPeeringDBPeers(ctx context.Context, members []*pb.PeeringDBMember) error {
+	tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	wanted := make(map[string]*pb.PeeringDBMember)
+	for _, member := range members {
+		wanted[fmt.Sprintf("%d", member.Asn)] = member
+	}
+
+	toDelete := make(map[string]bool)
+	toAdd := make(map[string]bool)
+	toUpdate := make(map[string]bool)
+
+	existing := []sqlPeer{}
+	existingMap := make(map[string]*sqlPeer)
+
+	q := `
+		SELECT peers.id, peers.asn, peers.name, peers.source
+		FROM peers
+	`
+	if err := tx.SelectContext(ctx, &existing, q); err != nil {
+		return fmt.Errorf("SELECT peers: %v", err)
+	}
+
+	// Mark ASs to delete and note existing ASs
+	for _, ex := range existing {
+		ex := ex
+		if wanted[ex.ASN] == nil && ex.Source == "from-peeringdb" {
+			toDelete[ex.ASN] = true
+		}
+		existingMap[ex.ASN] = &ex
+	}
+
+	// Mark ASs to add
+	for k, _ := range wanted {
+		if existingMap[k] == nil {
+			toAdd[k] = true
+		}
+	}
+
+	// Mark ASs to update
+	for k, wd := range wanted {
+		if existingMap[k] == nil {
+			continue
+		}
+		if existingMap[k].Source != "from-peeringdb" {
+			continue
+		}
+		if wd.Name != existingMap[k].Name {
+			toUpdate[k] = true
+			continue
+		}
+	}
+
+	if len(toAdd) > 0 {
+		glog.Infof("RecordPeeringDBPeers: adding %v", toAdd)
+	}
+	if len(toDelete) > 0 {
+		glog.Infof("RecordPeeringDBPeers: deleting %v", toDelete)
+	}
+	if len(toUpdate) > 0 {
+		glog.Infof("RecordPeeringDBPeers: updating %v", toUpdate)
+	}
+
+	// Run INSERT to add new ASNs
+	if len(toAdd) > 0 {
+		q = `
+			INSERT INTO peers
+				(asn, name, source)
+			VALUES
+				(:asn, :name, :source)
+		`
+
+		add := make([]*sqlPeer, len(toAdd))
+		i := 0
+		for ta, _ := range toAdd {
+			add[i] = &sqlPeer{
+				ASN:    ta,
+				Name:   wanted[ta].Name,
+				Source: "from-peeringdb",
+			}
+			i += 1
+		}
+
+		if _, err := tx.NamedExecContext(ctx, q, add); err != nil {
+			return fmt.Errorf("INSERT peers: %v", err)
+		}
+	}
+
+	// Run DELETE to remove nonexistent ASNs
+	if len(toDelete) > 0 {
+		deleteIds := make([]string, len(toDelete))
+		i := 0
+		for td, _ := range toDelete {
+			deleteIds[i] = existingMap[td].ID
+			i += 1
+		}
+		query, args, err := sqlx.In("DELETE FROM peers WHERE id IN (?)", deleteIds)
+		if err != nil {
+			return fmt.Errorf("DELETE peers: %v", err)
+		}
+		query = tx.Rebind(query)
+		_, err = tx.ExecContext(ctx, query, args...)
+		if err != nil {
+			return fmt.Errorf("DELETE peers: %v", err)
+		}
+	}
+
+	// Run UPDATE to update existing ASNs
+	for k, _ := range toUpdate {
+		want := wanted[k]
+		got := existingMap[k]
+
+		fields := []string{}
+		args := []interface{}{}
+		if want.Name != got.Name {
+			fields = append(fields, "name = ?")
+			args = append(args, want.Name)
+		}
+
+		q = fmt.Sprintf(`
+			UPDATE peers
+			SET
+				%s
+			WHERE
+				id = ?
+		`, strings.Join(fields, ",\n"))
+		q = tx.Rebind(q)
+		args = append(args, got.ID)
+		_, err := tx.ExecContext(ctx, q, args...)
+		if err != nil {
+			return fmt.Errorf("UPDATE peers: %v", err)
+		}
+	}
+
+	return tx.Commit()
+}
+
+func (s *sqlModel) GetPeeringDBPeer(ctx context.Context, asn int64) (*pb.PeeringDBMember, error) {
+	data := []struct {
+		sqlPeer       `db:"peers"`
+		sqlPeerRouter `db:"peer_routers"`
+	}{}
+	q := `
+		SELECT
+			peers.id "peers.id",
+			peers.asn "peers.asn",
+			peers.name "peers.name",
+
+			peer_routers.peer_id "peer_routers.peer_id",
+			peer_routers.v6 "peer_routers.v6",
+			peer_routers.v4 "peer_routers.v4"
+		FROM peers
+		LEFT JOIN peer_routers
+		ON peer_routers.peer_id = peers.id
+		WHERE peers.asn = $1
+	`
+	if err := s.db.SelectContext(ctx, &data, q, asn); err != nil {
+		return nil, fmt.Errorf("SELECT peers/peerRouters: %v", err)
+	}
+
+	res := &pb.PeeringDBMember{}
+
+	for i, row := range data {
+		if res.Routers == nil {
+			asn, err := strconv.ParseInt(row.sqlPeer.ASN, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("data corruption: invalid ASN %q", row.sqlPeer.ASN)
+			}
+			res.Asn = asn
+			res.Name = row.sqlPeer.Name
+			res.Routers = make([]*pb.PeeringDBMember_Router, len(data))
+		}
+
+		res.Routers[i] = &pb.PeeringDBMember_Router{}
+		if row.sqlPeerRouter.V6.Valid {
+			res.Routers[i].Ipv6 = row.sqlPeerRouter.V6.String
+		}
+		if row.sqlPeerRouter.V4.Valid {
+			res.Routers[i].Ipv4 = row.sqlPeerRouter.V4.String
+		}
+	}
+
+	return res, nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/pgp.go b/bgpwtf/cccampix/verifier/model/pgp.go
new file mode 100644
index 0000000..a76186e
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/pgp.go
@@ -0,0 +1,31 @@
+package model
+
+import (
+	"context"
+	"fmt"
+	"time"
+)
+
+func (s *sqlModel) UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error {
+	q := `
+		INSERT INTO peer_pgp_keys
+			(peer_id, fingerprint, time_created)
+		SELECT
+			peers.id, :fingerprint, :time_created
+		FROM peers
+		WHERE peers.asn = :asn
+		ON CONFLICT (peer_id)
+		DO UPDATE SET
+			fingerprint = :fingerprint,
+			time_created = :time_created
+	`
+	data := &sqlPeerPGPKey{
+		Fingerprint: key.Fingerprint,
+		ASN:         fmt.Sprintf("%d", key.PeerASN),
+		TimeCreated: time.Now().UnixNano(),
+	}
+	if _, err := s.db.NamedExecContext(ctx, q, data); err != nil {
+		return fmt.Errorf("INSERT peer_pgp_keys: %v", err)
+	}
+	return nil
+}
diff --git a/bgpwtf/cccampix/verifier/model/schema.go b/bgpwtf/cccampix/verifier/model/schema.go
new file mode 100644
index 0000000..093ecc8
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/schema.go
@@ -0,0 +1,65 @@
+package model
+
+import "database/sql"
+
+type sqlPeer struct {
+	ID     string `db:"id"`
+	ASN    string `db:"asn"`
+	Name   string `db:"name"`
+	Source string `db:"source"`
+}
+
+type sqlPeerRouter struct {
+	ID     string         `db:"id"`
+	PeerID string         `db:"peer_id"`
+	V6     sql.NullString `db:"v6"`
+	V4     sql.NullString `db:"v4"`
+	Source string         `db:"source"`
+
+	// Fake, used by app logic.
+	ASN string `db:"asn"`
+}
+
+type sqlPeerCheck struct {
+	ID           string `db:"id"`
+	PeerID       string `db:"peer_id"`
+	CheckName    string `db:"check_name"`
+	CheckTime    int64  `db:"check_time"`
+	CheckStatus  string `db:"check_status"`
+	CheckMessage string `db:"check_message"`
+	Delete       bool   `db:"delete"`
+
+	// Fake, used by app logic.
+	ASN string `db:"asn"`
+}
+
+type sqlPeerPGPKey struct {
+	ID          string `db:"id"`
+	PeerID      string `db:"peer_id"`
+	Fingerprint string `db:"fingerprint"`
+	TimeCreated int64  `db:"time_created"`
+
+	// Fake, used by app logic.
+	ASN string `db:"asn"`
+}
+
+type sqlSessionConfig struct {
+	ID           string `db:"id"`
+	PeerID       string `db:"peer_id"`
+	PeerRouterID string `db:"peer_router_id"`
+
+	BGPSecret string `db:"bgp_secret"`
+}
+
+type sqlAllowedPrefix struct {
+	ID        string `db:"id"`
+	PeerID    string `db:"peer_id"`
+	Timestamp int64  `db:"timestamp"`
+
+	Prefix    string `db:"prefix"`
+	MaxLength int64  `db:"max_length"`
+	TA        string `db:"ta"`
+
+	// Fake, used by app logic.
+	ASN string `db:"asn"`
+}
diff --git a/bgpwtf/cccampix/verifier/model/submit_checks.go b/bgpwtf/cccampix/verifier/model/submit_checks.go
new file mode 100644
index 0000000..79e2a84
--- /dev/null
+++ b/bgpwtf/cccampix/verifier/model/submit_checks.go
@@ -0,0 +1,73 @@
+package model
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+
+	"github.com/golang/glog"
+)
+
+func (s *sqlModel) SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error {
+	tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
+	defer tx.Rollback()
+
+	q := `
+		UPDATE peer_checks
+		SET delete = true
+	`
+	if _, err := tx.ExecContext(ctx, q); err != nil {
+		return fmt.Errorf("UPDATE for deletion peer_checks: %v", err)
+	}
+
+	seenASNs := make(map[int64]bool)
+
+	for _, pcr := range res {
+		seenASNs[pcr.PeerASN] = true
+
+		q = `
+			INSERT INTO peer_checks
+				(peer_id, check_name, check_time, check_status, check_message, delete)
+			SELECT
+				peers.id, :check_name, :check_time, :check_status, :check_message, false
+			FROM peers
+			WHERE peers.asn = :asn
+			ON CONFLICT (peer_id, check_name)
+			DO UPDATE SET
+				check_time = :check_time,
+				check_status = :check_status,
+				check_message = :check_message,
+				delete = false
+		`
+		status := "uknown"
+		switch pcr.Status {
+		case PeerCheckStatus_Okay:
+			status = "okay"
+		case PeerCheckStatus_Failed:
+			status = "failed"
+		case PeerCheckStatus_SoftFailed:
+			glog.Infof("Skipping soft failure: %+v", pcr)
+			continue
+		}
+		cr := sqlPeerCheck{
+			CheckName:    pcr.CheckName,
+			CheckTime:    pcr.Time.UnixNano(),
+			CheckStatus:  status,
+			CheckMessage: pcr.Message,
+			ASN:          fmt.Sprintf("%d", pcr.PeerASN),
+		}
+		if _, err := tx.NamedExecContext(ctx, q, cr); err != nil {
+			return fmt.Errorf("INSERT peer_checks: %v", err)
+		}
+	}
+
+	q = `
+		DELETE FROM peer_checks
+		WHERE delete = true
+	`
+	if _, err := tx.ExecContext(ctx, q); err != nil {
+		return fmt.Errorf("DELETE FROM peer_checks: %v", err)
+	}
+
+	return tx.Commit()
+}