Merge "games/factorio: pymods: bump up resources again"
diff --git a/WORKSPACE b/WORKSPACE
index 9370030..02b24e6 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -18,11 +18,14 @@
 
 protobuf_deps()
 
-# Force rules_python at a bleeding edge version (for pip3_import).
+# Force rules_python at a bleeding edge version (for setuptools >44).
+rules_python_version = "929d5a13d4eb1b930086d9353fc6f2d6ad306e43"
+
 http_archive(
     name = "rules_python",
-    url = "https://github.com/bazelbuild/rules_python/releases/download/0.0.3/rules_python-0.0.3.tar.gz",
-    sha256 = "e46612e9bb0dae8745de6a0643be69e8665a03f63163ac6610c210e80d14c3e4",
+    strip_prefix = "rules_python-{}".format(rules_python_version),
+    url = "https://github.com/bazelbuild/rules_python/archive/{}.zip".format(rules_python_version),
+    sha256 = "b590e4fc07ec842b8cc8a39a4ca0336f44d7d5f96753229d240884cd016dc1e3",
 )
 
 # Download Go/Gazelle rules
@@ -46,27 +49,20 @@
 
 # Python rules
 # Important: rules_python must be loaded before protobuf (and grpc) because they load an older version otherwise
-load("@rules_python//python:repositories.bzl", "py_repositories")
+load("@rules_python//python:pip.bzl", "pip_parse")
 
-py_repositories()
-
-load("@rules_python//python:pip.bzl", "pip_repositories")
-
-pip_repositories()
-
-load("@rules_python//python:pip.bzl", "pip3_import")
-
-pip3_import(
+pip_parse(
     name = "pydeps",
-    requirements = "//third_party/py:requirements.txt",
+    requirements_lock = "//third_party/py:requirements.txt",
 )
 
-load("@pydeps//:requirements.bzl", "pip_install")
+load("@pydeps//:requirements.bzl", "install_deps")
 
-pip_install()
+install_deps()
 
 # Setup Go toolchain.
 load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains")
+
 go_register_toolchains()
 
 # IMPORTANT: match protobuf version above with the one loaded by grpc
@@ -163,8 +159,9 @@
 
 gerrit_api()
 
-load("//devtools/gerrit/gerrit-oauth-provider:external_plugin_deps.bzl", gerrit_oauth_deps="external_plugin_deps")
-gerrit_oauth_deps(omit_commons_codec=False)
+load("//devtools/gerrit/gerrit-oauth-provider:external_plugin_deps.bzl", gerrit_oauth_deps = "external_plugin_deps")
+
+gerrit_oauth_deps(omit_commons_codec = False)
 
 # Gerrit 3.3.2 built by q3k, backported with fix for 'empty reviewers column' bug.
 # See: https://bugs.chromium.org/p/gerrit/issues/detail?id=13899
@@ -247,7 +244,9 @@
     commit = "17817c9e319073c03513f9d5177b6142b8fd567b",
     shallow_since = "1593642470 +0200",
 )
-load("@com_googlesource_gerrit_plugin_owners//:external_plugin_deps_standalone.bzl", gerrit_owners_deps="external_plugin_deps_standalone")
+
+load("@com_googlesource_gerrit_plugin_owners//:external_plugin_deps_standalone.bzl", gerrit_owners_deps = "external_plugin_deps_standalone")
+
 gerrit_owners_deps()
 
 # Go image repos for Docker
@@ -299,22 +298,8 @@
 )
 
 go_repository(
-    name = "com_github_gorilla_sessions",
-    importpath = "github.com/gorilla/sessions",
-    sum = "h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=",
-    version = "v1.2.1",
-)
-
-go_repository(
-    name = "com_github_boltdb_bolt",
-    importpath = "github.com/boltdb/bolt",
-    sum = "h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=",
-    version = "v1.3.1",
-)
-
-go_repository(
-    name = "com_github_gorilla_securecookie",
-    importpath = "github.com/gorilla/securecookie",
-    sum = "h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=",
-    version = "v1.1.1",
+    name = "com_github_arran4_golang_ical",
+    importpath = "github.com/arran4/golang-ical",
+    sum = "h1:oOgavmDMGCnNtwZwNoXuK3jCcpF3I96Do9/5qPeSCr8=",
+    version = "v0.0.0-20210601225245-48fd351b08e7",
 )
diff --git a/app/matrix/matrix.hackerspace.pl.jsonnet b/app/matrix/matrix.hackerspace.pl.jsonnet
index 9654c53..2e769b3 100644
--- a/app/matrix/matrix.hackerspace.pl.jsonnet
+++ b/app/matrix/matrix.hackerspace.pl.jsonnet
@@ -28,6 +28,24 @@
         },
     },
 
+    // Bump up storage to 200Gi from default 100Gi, use different name.  The
+    // new name corresponds to a manually migrated and sized-up PVC that
+    // contains data from the original waw3-postgres PVC.
+    postgres3+: {
+        volumeClaim+: {
+            metadata+: {
+                name: "waw3-postgres-2",
+            },
+            spec+: {
+                resources+: {
+                    requests+: {
+                        storage: "200Gi",
+                    },
+                },
+            },
+        },
+    },
+
     appservices: {
         "irc-freenode": irc.AppServiceIrc("freenode") {
             cfg+: {
diff --git a/bgpwtf/machines/edge01.waw.bgp.wtf.nix b/bgpwtf/machines/edge01.waw.bgp.wtf.nix
index cf61bd9..ef61472 100644
--- a/bgpwtf/machines/edge01.waw.bgp.wtf.nix
+++ b/bgpwtf/machines/edge01.waw.bgp.wtf.nix
@@ -172,7 +172,7 @@
   '';
   hscloud.routing.originate = {
     # WAW prefixes, exposed into internet BGP table.
-    v4.waw = { table = "internet"; address = "185.236.240.0"; prefixLength = 23; };
+    v4.waw = { table = "internet"; address = "185.236.240.0"; prefixLength = 24; };
     v6.waw = { table = "internet"; address = "2a0d:eb00::"; prefixLength = 32; };
 
     # Default gateway via us, exposed into aggregated table.
@@ -307,6 +307,8 @@
         if net ~ [ 185.236.240.32/28+ ] then accept;
         # k0 metallb pools
         if net ~ [ 185.236.240.48/28+, 185.236.240.112/28+ ] then accept;
+        #  dcsw01.hswaw.net / dcr03sw48.hswaw.net 
+        if net ~ [ 185.236.240.66/31 ] then accept;
         reject;
       '';
     };
@@ -325,6 +327,19 @@
         reject;
       '';
     };
+    "waw_q3k" = {
+      description = "AGGREGATE CUSTOMER q3k";
+      table = "internet";
+      local = "185.236.240.64";
+      asn = 204880;
+      neighbors = [
+        { address = "185.236.240.65"; asn = 208521; }
+      ];
+      filterIn = ''
+        if net ~ [ 185.236.241.0/24 ] then accept;
+        reject;
+      '';
+    };
     # backup LTE link to edge01.fra
     "fra_edge01" = {
       description = "IBGP edge01.fra";
diff --git a/bgpwtf/oob/index.md b/bgpwtf/oob/index.md
new file mode 100644
index 0000000..8e19eb4
--- /dev/null
+++ b/bgpwtf/oob/index.md
@@ -0,0 +1,75 @@
+LTE Backup
+===
+
+We have a network backup via vultr. As it's difficult to convey its architecture via netbox, we document it slightly more here.
+
+Architecture
+===
+
+The Device (oob1.waw)
+---
+
+oob1.waw is a WDR3600 running OpenWRT, present in dcr01. It has a USB modem plugged in for LTE uplink.
+
+See [netbox](https://netbox.bgp.wtf/dcim/devices/10/) for L1 connectivity diagrams.
+
+The Bridge (185.236.240.72/29)
+---
+
+There is an L2 subnet/bridge that spans three boxes: edge01.waw, oob1.waw and edge01.fra. This network is dieectly available on one of oob1.waw's ports, and edge01.waw connects to it (for information on ports, see netbox).
+
+    185.236.240.73 - address of oob1.waw on bridge, set on br-bgpvpn on OpenWRT
+    185.236.240.74 - address of edge01.waw on bridge, set on physical interface on edge
+    185.236.240.75 - address of edge01.fra on bridge, set on OpenVPN tap
+
+This subnet is announced by edge01.waw to its main upstreams and through a huge prepend on edge01.fra. When edge01.waw's main uplink goes down, traffic gets routed through edge01.fra, as with the rest of the current production subnet (185.236.240.0/24). However, if edge01.waw is dead, the subnet will stop being announced at all. You will then have to manually jump through edge01.fra.bgp.wtf.
+
+185.236.240.73 (oob1.waw) is not publicly routed, as oob1 has its main routing table default gw via LTE. There is a runbook below on how to access this box.
+
+The OpenVPN tunnel
+---
+
+There is an OpenVPN tunnel set between oob1.waw and edge01.fra. edge01.fra is the server, listening on its publically routed internet adress. oob1 is the client. On oob1, the resulting tap interface is part of the br-bgpvpn bridge.
+
+The LTE Uplink
+---
+
+oob1.waw has a USB modem plugged in with a Play SIM card. The modem exposes a RFC1918 network that it NATs itself (or passes over to a CGNAT at Play?), 192.168.8.1/24. oob1 has a DHCP address in this subnet.
+
+The BGP session
+---
+
+This entire setup lets us establish a non-multihop iBGP session between 185.236.240.74 and 185.236.240.75, and that's what we do.
+Access to 10.10.10.0/24
+
+oob1 sits in this network at .2, and is plugged into the management switch in dcr01. All LAN (yellow) ports on oob1 are also part of this subnet.
+
+Access runbook
+---
+
+oob1.waw, if edge01.waw is up:
+
+    $ ssh -J root@edge01.waw.bgp.wtf root@185.236.240.73
+
+oob1.waw, if edge01.fra is up and the LTE tunnel is up (but edge01.waw is dead):
+
+    $ ssh -J root@edge01.fra.bgp.wtf root@185.236.240.73
+
+LTE modem interface
+
+    $ sudo ip a add 192.168.8.1/32 dev lo
+    $ sudo ssh -J root@edge01.waw.bgp.wtf root@185.236.240.73 -L 192.168.8.1:80:192.168.8.1:80
+    $ # now you can visit 192.168.8.1 on your local web browser
+
+Management
+---
+
+oob1 and edge01.fra are currently fully manually managed. q3k has access.
+
+SIM Card Plan
+---
+
+See: [netbox](https://netbox.bgp.wtf/dcim/devices/10/) for phone number and latest validity information.
+
+To manage the SIM plan, you'll have to access the modem web interface as above. From there, you can bootstrap yourself access to Play24. Generally topping up gives you tons of data, so you can do that if seemingly the link is down. Then, you can try logging into Play24 and buying a year-long subscription package for 50PLN.
+
diff --git a/games/factorio/kube/config/map-gen-settings.libsonnet b/games/factorio/kube/config/map-gen-settings.libsonnet
new file mode 100644
index 0000000..18612f3
--- /dev/null
+++ b/games/factorio/kube/config/map-gen-settings.libsonnet
@@ -0,0 +1,81 @@
+{
+    // The following options are defaults taken verbatim from Factorio 1.1.35
+    // headless' map-gen-settings.example.json, with the following changes:
+    //  - field names unquoted where possible
+    //  - pseudo-comments converted to jsonnet comments
+    //  - comments reflowed to fit 80 columns
+
+    // The inverse of 'water scale' in the map generator GUI.
+    terrain_segmentation: 1,
+
+    // The equivalent to 'water coverage' in the map generator GUI. Higher
+    // coverage means more water in larger oceans.
+    // Water level = 10 * log2(this value)
+    water: 1,
+
+    // Width and height of map, in tiles; 0 means infinite
+    width: 0,
+    height: 0,
+
+    // Multiplier for 'biter free zone radius'
+    starting_area: 1,
+
+    peaceful_mode: false,
+    autoplace_controls: {
+      "coal": {"frequency": 1, "size": 1, "richness": 1},
+      "stone": {"frequency": 1, "size": 1, "richness": 1},
+      "copper-ore": {"frequency": 1, "size": 1,"richness": 1},
+      "iron-ore": {"frequency": 1, "size": 1, "richness": 1},
+      "uranium-ore": {"frequency": 1, "size": 1, "richness": 1},
+      "crude-oil": {"frequency": 1, "size": 1, "richness": 1},
+      "trees": {"frequency": 1, "size": 1, "richness": 1},
+      "enemy-base": {"frequency": 1, "size": 1, "richness": 1}
+    },
+
+    cliff_settings:
+    {
+      // Name of the cliff prototype
+      name: "cliff",
+
+      // Elevation of first row of cliffs
+      cliff_elevation_0: 10,
+
+      // Elevation difference between successive rows of cliffs.
+      // This is inversely proportional to 'frequency' in the map generation
+      // GUI. Specifically, when set from the GUI the value is 40 / frequency.
+      cliff_elevation_interval: 40,
+
+      // Called 'cliff continuity' in the map generator GUI. 0 will result in
+      // no cliffs, 10 will make all cliff rows completely solid
+      richness: 1
+    },
+
+    // Overrides for property value generators (map type)
+    // Leave 'elevation' blank to get 'normal' terrain.
+    // Use 'elevation': '0_16-elevation' to reproduce terrain from 0.16.
+    // Use 'elevation': '0_17-island' to get an island.
+    // Moisture and terrain type are also controlled via this.
+    property_expression_names:
+    {
+      // 'control-setting:moisture:frequency:multiplier' is the inverse of the
+      // 'moisture scale' in the map generator GUI.
+      "control-setting:moisture:frequency:multiplier": "1",
+      // 'control-setting:moisture:bias' is the 'moisture bias' in the map
+      // generator GUI.
+      "control-setting:moisture:bias": "0",
+      // 'control-setting:aux:frequency:multiplier' is the inverse of the
+      // 'terrain type scale' in the map generator GUI.
+      "control-setting:aux:frequency:multiplier": "1",
+      // 'control-setting:aux:bias' is the 'terrain type bias' in the map
+      // generator GUI
+      "control-setting:aux:bias": "0"
+    },
+
+    starting_points:
+    [
+      { x: 0, y: 0}
+    ],
+
+    // Use null for a random seed, number for a specific seed.
+    seed: null
+}
diff --git a/games/factorio/kube/config/map-settings.libsonnet b/games/factorio/kube/config/map-settings.libsonnet
new file mode 100644
index 0000000..458a348
--- /dev/null
+++ b/games/factorio/kube/config/map-settings.libsonnet
@@ -0,0 +1,126 @@
+{
+    // The following options are defaults taken verbatim from Factorio 1.1.35
+    // headless' map-settings.example.json, with the following changes:
+    //  - field names unquoted where possible
+    //  - pseudo-comments converted to jsonnet comments
+    //  - comments reflowed to fit 80 columns
+
+    difficulty_settings:
+    {
+      recipe_difficulty: 0,
+      technology_difficulty: 0,
+      technology_price_multiplier: 1,
+      research_queue_setting: "after-victory"
+    },
+    pollution:
+    {
+      enabled: true,
+      // these are values for 60 ticks (1 simulated second)
+      // amount that is diffused to neighboring chunk
+      diffusion_ratio: 0.02,
+      min_to_diffuse: 15,
+      ageing: 1,
+      expected_max_per_chunk: 150,
+      min_to_show_per_chunk: 50,
+      min_pollution_to_damage_trees: 60,
+      pollution_with_max_forest_damage: 150,
+      pollution_per_tree_damage: 50,
+      pollution_restored_per_tree_damage: 10,
+      max_pollution_to_restore_trees: 20,
+      enemy_attack_pollution_consumption_modifier: 1
+    },
+    enemy_evolution:
+    {
+      enabled: true,
+      time_factor: 0.000004,
+      destroy_factor: 0.002,
+      pollution_factor: 0.0000009
+    },
+    enemy_expansion:
+    {
+      enabled: true,
+      min_base_spacing: 3,
+      max_expansion_distance: 7,
+      friendly_base_influence_radius: 2,
+      enemy_building_influence_radius: 2,
+      building_coefficient: 0.1,
+      other_base_coefficient: 2.0,
+      neighbouring_chunk_coefficient: 0.5,
+      neighbouring_base_chunk_coefficient: 0.4,
+      max_colliding_tiles_coefficient: 0.9,
+      settler_group_min_size: 5,
+      settler_group_max_size: 20,
+      min_expansion_cooldown: 14400,
+      max_expansion_cooldown: 216000
+    },
+    unit_group:
+    {
+      min_group_gathering_time: 3600,
+      max_group_gathering_time: 36000,
+      max_wait_time_for_late_members: 7200,
+      max_group_radius: 30.0,
+      min_group_radius: 5.0,
+      max_member_speedup_when_behind: 1.4,
+      max_member_slowdown_when_ahead: 0.6,
+      max_group_slowdown_factor: 0.3,
+      max_group_member_fallback_factor: 3,
+      member_disown_distance: 10,
+      tick_tolerance_when_member_arrives: 60,
+      max_gathering_unit_groups: 30,
+      max_unit_group_size: 200
+    },
+    steering:
+    {
+      default:
+      {
+        radius: 1.2,
+        separation_force: 0.005,
+        separation_factor: 1.2,
+        force_unit_fuzzy_goto_behavior: false
+      },
+      moving:
+      {
+        radius: 3,
+        separation_force: 0.01,
+        separation_factor: 3,
+        force_unit_fuzzy_goto_behavior: false
+      }
+    },
+    path_finder:
+    {
+      fwd2bwd_ratio: 5,
+      goal_pressure_ratio: 2,
+      max_steps_worked_per_tick: 100,
+      max_work_done_per_tick: 8000,
+      use_path_cache: true,
+      short_cache_size: 5,
+      long_cache_size: 25,
+      short_cache_min_cacheable_distance: 10,
+      short_cache_min_algo_steps_to_cache: 50,
+      long_cache_min_cacheable_distance: 30,
+      cache_max_connect_to_cache_steps_multiplier: 100,
+      cache_accept_path_start_distance_ratio: 0.2,
+      cache_accept_path_end_distance_ratio: 0.15,
+      negative_cache_accept_path_start_distance_ratio: 0.3,
+      negative_cache_accept_path_end_distance_ratio: 0.3,
+      cache_path_start_distance_rating_multiplier: 10,
+      cache_path_end_distance_rating_multiplier: 20,
+      stale_enemy_with_same_destination_collision_penalty: 30,
+      ignore_moving_enemy_collision_distance: 5,
+      enemy_with_different_destination_collision_penalty: 30,
+      general_entity_collision_penalty: 10,
+      general_entity_subsequent_collision_penalty: 3,
+      extended_collision_penalty: 3,
+      max_clients_to_accept_any_new_request: 10,
+      max_clients_to_accept_short_new_request: 100,
+      direct_distance_to_consider_short_request: 100,
+      short_request_max_steps: 1000,
+      short_request_ratio: 0.5,
+      min_steps_to_check_path_find_termination: 2000,
+      start_to_goal_cost_multiplier_to_terminate_path_find: 500.0,
+      overload_levels: [0, 100, 500],
+      overload_multipliers: [2, 3, 4],
+      negative_path_cache_delay_interval: 20
+    },
+    max_failed_behavior_count: 3
+}
diff --git a/games/factorio/kube/config/server-settings.libsonnet b/games/factorio/kube/config/server-settings.libsonnet
new file mode 100644
index 0000000..876ec85
--- /dev/null
+++ b/games/factorio/kube/config/server-settings.libsonnet
@@ -0,0 +1,91 @@
+{
+    // The following options are defaults taken verbatim from Factorio 1.1.35
+    // headless' server-settings.example.json, with the following changes:
+    //  - field names unquoted where possible
+    //  - pseudo-comments converted to jsonnet comments
+    //  - comments reflowed to fit 80 columns
+
+    name: "Name of the game as it will appear in the game listing",
+    description: "Description of the game that will appear in the listing",
+    tags: ["game", "tags"],
+
+    // Maximum number of players allowed, admins can join even a full server. 0
+    // means unlimited.
+    max_players: 0,
+    
+    visibility:
+    {
+      // Game will be published on the official Factorio matching server
+      public: true,
+      // Game will be broadcast on LAN
+      lan: true,
+    },
+    
+    // Your factorio.com login credentials. Required for games with visibility
+    // public
+    username: "",
+    password: "",
+    
+    // Authentication token. May be used instead of 'password' above.
+    token: "",
+    
+    game_password: "",
+    
+    // When set to true, the server will only allow clients that have a valid
+    // Factorio.com account
+    require_user_verification: true,
+    
+    // optional, default value is 0. 0 means unlimited.
+    max_upload_in_kilobytes_per_second: 0,
+    
+    // optional, default value is 5. 0 means unlimited.
+    max_upload_slots: 5,
+    
+    // optional one tick is 16ms in default speed, default value is 0.
+    // 0 means no minimum.
+    minimum_latency_in_ticks: 0,
+    
+    // Players that played on this map already can join even when the max
+    // player limit was reached.
+    ignore_player_limit_for_returning_players: false,
+    
+    // possible values are, true, false and admins-only
+    allow_commands: "admins-only",
+    
+    // Autosave interval in minutes
+    autosave_interval: 10,
+    
+    // server autosave slots, it is cycled through when the server autosaves.
+    autosave_slots: 5,
+    
+    // How many minutes until someone is kicked when doing nothing, 0 for
+    // never.
+    afk_autokick_interval: 0,
+    
+    // Whether should the server be paused when no players are present.
+    auto_pause: true,
+    
+    only_admins_can_pause_the_game: true,
+    
+    // Whether autosaves should be saved only on server or also on all
+    // connected clients. Default is true.
+    autosave_only_on_server: true,
+    
+    // Highly experimental feature, enable only at your own risk of losing your
+    // saves. On UNIX systems, server will fork itself to create an autosave.
+    // Autosaving on connected Windows clients will be disabled regardless of
+    // autosave_only_on_server option.
+    non_blocking_saving: false,
+    
+    // Long network messages are split into segments that are sent over
+    // multiple ticks. Their size depends on the number of peers currently
+    // connected. Increasing the segment size will increase upload bandwidth
+    // requirement for the server and download bandwidth requirement for
+    // clients. This setting only affects server outbound messages. Changing
+    // these settings can have a negative impact on connection stability for
+    // some clients.
+    minimum_segment_size: 25,
+    minimum_segment_size_peer_count: 20,
+    maximum_segment_size: 100,
+    maximum_segment_size_peer_count: 10,
+}
diff --git a/games/factorio/kube/factorio.libsonnet b/games/factorio/kube/factorio.libsonnet
index 57d5bf4..7d08ad7 100644
--- a/games/factorio/kube/factorio.libsonnet
+++ b/games/factorio/kube/factorio.libsonnet
@@ -17,6 +17,42 @@
         rconPort: 2137,
         rconPassword: "farts",
 
+        // Configuration options that will get serialized into
+        // --config config.ini
+        factorioConfig: {
+            // There is no documentation for this file, but you can check
+            // config.ini in any installed Factorio data directory for a
+            // sample.
+            //
+            // This uses std.manifestIni, so to create a file containing:
+            //   version=9
+            //   [other]
+            //   verbose-logging=true
+            // You would set the following:
+            // main: {
+            //   version: "9",
+            // },
+            // other: {
+            //   "verbose-logging": "true",
+            // },
+        },
+        // Configuration options that will get serialized into
+        // --server-settings server-settings.json.
+        serverSettings: (import "config/server-settings.libsonnet") {
+            visibility+: {
+              public: false,
+              lan: false,
+            },
+        },
+        // Configuration options that will get serialized into
+        // --map-settings map-settings.json.
+        mapSettings: (import "config/map-settings.libsonnet") {
+        },
+        // Configuration options that will get serialized into
+        // --map-gen-settings map-gen-settings.json.
+        mapGenSettings: (import "config/map-gen-settings.libsonnet") {
+        },
+
         tag: "latest",
         image: "registry.k0.hswaw.net/q3k/factorio:" + cfg.tag,
         resources: {
@@ -78,6 +114,9 @@
                 "mod { name: \"%s\" version: \"%s\" }" % [m.name, m.version],
                 for m in cfg.mods
             ]),
+            "server-settings.json": std.manifestJson(cfg.serverSettings),
+            "map-settings.json": std.manifestJson(cfg.mapSettings),
+            "map-gen-settings.json": std.manifestJson(cfg.mapGenSettings),
         },
     },
 
@@ -115,6 +154,9 @@
                                 "/entrypoint.sh",
                                 "--rcon-port", std.toString(cfg.rconPort),
                                 "--rcon-password", cfg.rconPassword,
+                                "--server-settings", "/factorio/config/server-settings.json",
+                                "--map-settings", "/factorio/config/map-settings.json",
+                                "--map-gen-settings", "/factorio/config/map-gen-settings.json",
                             ],
                             ports_: {
                                 client: { containerPort: 34197 },
@@ -123,6 +165,7 @@
                             volumeMounts_: {
                                 data: { mountPath: "/data" },
                                 mods: { mountPath: "/factorio/mods" },
+                                config: { mountPath: "/factorio/config" },
                             },
                             resources: cfg.resources,
                         },
diff --git a/hswaw/site/calendar/BUILD.bazel b/hswaw/site/calendar/BUILD.bazel
new file mode 100644
index 0000000..297fde3
--- /dev/null
+++ b/hswaw/site/calendar/BUILD.bazel
@@ -0,0 +1,29 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "load.go",
+        "event.go",
+        "time.go",
+    ],
+    importpath = "code.hackerspace.pl/hscloud/hswaw/site/calendar",
+    visibility = ["//visibility:private"],
+    deps = [
+        "@com_github_arran4_golang_ical//:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = [
+        "event_test.go",
+        "load_test.go",
+    ],
+    data = [
+        ":test.ical",
+    ],
+    embed = [":go_default_library"],
+    deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
+)
diff --git a/hswaw/site/calendar/event.go b/hswaw/site/calendar/event.go
new file mode 100644
index 0000000..19a916b
--- /dev/null
+++ b/hswaw/site/calendar/event.go
@@ -0,0 +1,123 @@
+package calendar
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/golang/glog"
+)
+
+// UpcomingEvent is a calendar event that will happen in the near future, or is
+// currently happening (relative to same arbitrary timestamp of 'now',
+// depending on the way the UpcomingEvent is crated).
+//
+// It is a best-effort parse of an ICS/iCal event into some event that can be
+// interpreted as a 'community event', to be displayed publicly on a site.
+type UpcomingEvent struct {
+	// UID is the unique ICS/iCal ID of this event.
+	UID string
+	// Summary is the 'title' of the event, usually a short one-liner.
+	Summary string
+	// Start and End of the events, potentially whole-day dates. See EventTime
+	// for more information.
+	// If Start is WholeDay then so is End, and vice-versa.
+	Start *EventTime
+	// End of the event, exclusive of the time range (ie. if a timestamp it
+	// defines the timestamp at which the next event can start; if it's whole
+	// day it defines the first day on which the event does not take place).
+	End *EventTime
+	// Tentative is whether this event is marked as 'Tentative' in the source
+	// calendar.
+	Tentative bool
+}
+
+// WholeDay returns true if this is a whole-day (or multi-day) event.
+func (u *UpcomingEvent) WholeDay() bool {
+	return u.Start.WholeDay
+}
+
+var (
+	// onceComplainWarsawGone gates throwing a very verbose message about being
+	// unable to localize UpcomingEvents into Warsaw local time by WarsawDate.
+	onceComplainWarsawGone sync.Once
+)
+
+// WarsawDate prints a human-readable timestamp that makes sense within the
+// context of this event taking place in Warsaw, or at least in the same
+// timezone as Warsaw.
+// It will return a time in one of the following formats:
+//
+//   YEAR/MONTH/DAY
+//   (For one-day events)
+//
+//   YEAR/MONTH/DAY - DAY
+//   (For multi-day events within the same month)
+//
+//   YEAR/MONTH/DAY - YEAR/MONTH/DAY
+//   (For multi-day events spanning more than one month)
+//
+//   YEAR/MONTH/DAY HH:MM - HH:MM
+//   (For timestamped events within the same day)
+//
+//   YEAR/MONTH/DAY HH:MM - YEAR/MONTH/DAY HH:MM
+//   (For timestamped events spanning more than one day)
+//
+func (u *UpcomingEvent) WarsawDate() string {
+	YM := "2006/01"
+	D := "02"
+	YMD := "2006/01/02"
+	HM := "15:04"
+	YMDHM := "2006/01/02 15:04"
+
+	if u.WholeDay() {
+		start := u.Start.Time
+		// ICS whole-day dates are [start, end), ie. 'end' is exclusive.
+		end := u.End.Time.AddDate(0, 0, -1)
+		if start == end {
+			// Event is one-day.
+			return start.Format(YMD)
+		}
+		if start.Year() == end.Year() && start.Month() == end.Month() {
+			// Event starts and ends on the same month, print shortened form.
+			return fmt.Sprintf("%s/%s - %s", start.Format(YM), start.Format(D), end.Format(D))
+		}
+		// Event spans multiple months, print full form.
+		return fmt.Sprintf("%s - %s", start.Format(YMD), end.Format(YMD))
+	}
+
+	warsaw, err := time.LoadLocation("Europe/Warsaw")
+	if err != nil {
+		onceComplainWarsawGone.Do(func() {
+			glog.Errorf("Could not load Europe/Warsaw timezone, did the city cease to exist? LoadLoaction: %v", err)
+		})
+		// Even in the face of a cataclysm, degrade gracefully and assume the
+		// users are local to this service's timezone.
+		warsaw = time.Local
+	}
+
+	start := u.Start.Time.In(warsaw)
+	end := u.End.Time.In(warsaw)
+	if start.Year() == end.Year() && start.Month() == end.Month() && start.Day() == end.Day() {
+		// Event starts and ends on same day, print shortened form.
+		return fmt.Sprintf("%s %s - %s", start.Format(YMD), start.Format(HM), end.Format(HM))
+	}
+	// Event spans multiple days, print full form.
+	return fmt.Sprintf("%s - %s", start.Format(YMDHM), end.Format(YMDHM))
+}
+
+func (u *UpcomingEvent) String() string {
+	return fmt.Sprintf("%s (%s)", u.Summary, u.WarsawDate())
+}
+
+func (e *UpcomingEvent) Elapsed(t time.Time) bool {
+	// Event hasn't started yet?
+	if e.Start.Time.After(t) {
+		return false
+	}
+	// Event has started, but hasn't ended?
+	if e.End.Time.After(t) {
+		return false
+	}
+	return true
+}
diff --git a/hswaw/site/calendar/event_test.go b/hswaw/site/calendar/event_test.go
new file mode 100644
index 0000000..1e95306
--- /dev/null
+++ b/hswaw/site/calendar/event_test.go
@@ -0,0 +1,73 @@
+package calendar
+
+import (
+	"fmt"
+	"testing"
+	"time"
+)
+
+func TestWarsawDate(t *testing.T) {
+	makeTime := func(s string) EventTime {
+		t.Helper()
+		warsaw, err := time.LoadLocation("Europe/Warsaw")
+		if err != nil {
+			t.Fatalf("could not get Warsaw timezone: %v", err)
+		}
+		ti, err := time.ParseInLocation("2006/01/02 15:04", s, warsaw)
+		if err != nil {
+			t.Fatal("could not parse test time %q: %v", s, err)
+		}
+		return EventTime{
+			Time: ti,
+		}
+	}
+	makeDay := func(s string) EventTime {
+		t.Helper()
+		ti, err := time.Parse("2006/01/02", s)
+		if err != nil {
+			t.Fatal("could not parse test day %q: %v", s, err)
+		}
+		return EventTime{
+			Time:     ti,
+			WholeDay: true,
+		}
+	}
+	for i, te := range []struct {
+		start EventTime
+		end   EventTime
+		want  string
+	}{
+		{
+			makeTime("2021/03/14 13:37"), makeTime("2021/04/20 21:37"),
+			"2021/03/14 13:37 - 2021/04/20 21:37",
+		},
+		{
+			makeTime("2021/04/20 13:37"), makeTime("2021/04/20 21:37"),
+			"2021/04/20 13:37 - 21:37",
+		},
+		{
+			makeDay("2021/06/01"), makeDay("2021/07/01"),
+			"2021/06/01 - 30",
+		},
+		{
+			makeDay("2021/03/14"), makeDay("2021/04/21"),
+			"2021/03/14 - 2021/04/20",
+		},
+		{
+			makeDay("2021/04/20"), makeDay("2021/04/21"),
+			"2021/04/20",
+		},
+	} {
+		te := te
+		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+			ev := UpcomingEvent{
+				Start: &te.start,
+				End:   &te.end,
+			}
+			got := ev.WarsawDate()
+			if got != te.want {
+				t.Fatalf("wanted %q, got %q", te.want, got)
+			}
+		})
+	}
+}
diff --git a/hswaw/site/calendar/load.go b/hswaw/site/calendar/load.go
new file mode 100644
index 0000000..5ea9198
--- /dev/null
+++ b/hswaw/site/calendar/load.go
@@ -0,0 +1,120 @@
+package calendar
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"sort"
+	"time"
+	_ "time/tzdata"
+
+	ics "github.com/arran4/golang-ical"
+	"github.com/golang/glog"
+)
+
+const (
+	// eventsURL is the calendar from which we load public Hackerspace events.
+	eventsURL = "https://owncloud.hackerspace.pl/remote.php/dav/public-calendars/g8toktZrA9fyAHNi/?export"
+)
+
+// eventsBySooner sorts upcoming events so the one that happens the soonest
+// will be first in the list.
+type eventBySooner []*UpcomingEvent
+
+func (e eventBySooner) Len() int      { return len(e) }
+func (e eventBySooner) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e eventBySooner) Less(i, j int) bool {
+	a, b := e[i], e[j]
+	if a.Start.Time == b.Start.Time {
+		if a.End.Time == b.End.Time {
+			return a.UID < b.UID
+		}
+		return a.End.Time.Before(b.End.Time)
+	}
+	return a.Start.Time.Before(b.Start.Time)
+}
+
+// parseUpcomingEvents generates a list of upcoming events from an open ICS/iCal file.
+func parseUpcomingEvents(now time.Time, data io.Reader) ([]*UpcomingEvent, error) {
+	cal, err := ics.ParseCalendar(data)
+	if err != nil {
+		return nil, fmt.Errorf("ParseCalendar(%q): %w", err)
+	}
+
+	var out []*UpcomingEvent
+	for _, event := range cal.Events() {
+		uidProp := event.GetProperty(ics.ComponentPropertyUniqueId)
+		if uidProp == nil || uidProp.Value == "" {
+			glog.Errorf("Event with no UID, ignoring: %+v", event)
+			continue
+		}
+		uid := uidProp.Value
+
+		summaryProp := event.GetProperty(ics.ComponentPropertySummary)
+		if summaryProp == nil || summaryProp.Value == "" {
+			glog.Errorf("Event %s has no summary, ignoring", uid)
+		}
+		summary := summaryProp.Value
+
+		status := event.GetProperty(ics.ComponentPropertyStatus)
+		tentative := false
+		if status != nil {
+			if status.Value == string(ics.ObjectStatusCancelled) {
+				// NextCloud only has CONFIRMED, CANCELELD and TENTATIVE for
+				// events. We drop everything CANCELELD and keep things that are
+				// TENTATIVE.
+				continue
+			}
+			if status.Value == string(ics.ObjectStatusTentative) {
+				tentative = true
+			}
+		}
+
+		start, err := parseICSTime(event.GetProperty(ics.ComponentPropertyDtStart))
+		if err != nil {
+			glog.Errorf("Event %s has unparseable DTSTART, ignoring: %v", uid, err)
+			continue
+		}
+		end, err := parseICSTime(event.GetProperty(ics.ComponentPropertyDtEnd))
+		if err != nil {
+			glog.Errorf("Event %s has unparseable DTEND, ignoring: %v", uid, err)
+			continue
+		}
+
+		if (start.WholeDay && !end.WholeDay) || (!start.WholeDay && end.WholeDay) {
+			glog.Errorf("Event %s has whole-day inconsistencies, start: %s, end: %s, ignoring", uid, start, end)
+		}
+
+		u := &UpcomingEvent{
+			UID:       uid,
+			Summary:   summary,
+			Start:     start,
+			End:       end,
+			Tentative: tentative,
+		}
+		if u.Elapsed(now) {
+			continue
+		}
+
+		out = append(out, u)
+	}
+	sort.Sort(eventBySooner(out))
+	return out, nil
+}
+
+// GetUpcomingEvents returns all public Hackerspace events that are upcoming
+// relative to the given time 'now' as per the Warsaw Hackerspace public
+// calender (from owncloud.hackerspace.pl).
+func GetUpcomingEvents(ctx context.Context, now time.Time) ([]*UpcomingEvent, error) {
+	r, err := http.NewRequestWithContext(ctx, "GET", eventsURL, nil)
+	if err != nil {
+		return nil, fmt.Errorf("NewRequest(%q): %w", eventsURL, err)
+	}
+	res, err := http.DefaultClient.Do(r)
+	if err != nil {
+		return nil, fmt.Errorf("Do(%q): %w", eventsURL, err)
+	}
+	defer res.Body.Close()
+	return parseUpcomingEvents(now, res.Body)
+}
diff --git a/hswaw/site/calendar/load_test.go b/hswaw/site/calendar/load_test.go
new file mode 100644
index 0000000..a07f134
--- /dev/null
+++ b/hswaw/site/calendar/load_test.go
@@ -0,0 +1,51 @@
+package calendar
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func TestUpcomingEvents(t *testing.T) {
+	r, err := os.Open("test.ical")
+	if err != nil {
+		t.Fatalf("Could not open test ical: %v", err)
+	}
+	ti := time.Unix(1626011785, 0)
+
+	events, err := parseUpcomingEvents(ti, r)
+	if err != nil {
+		t.Fatalf("getUpcomingEvents: %v", err)
+	}
+
+	want := []*UpcomingEvent{
+		{
+			UID:     "65cd51ba-2fd7-475e-a274-61d19c186b66",
+			Summary: "test event please ignore",
+			Start: &EventTime{
+				Time: time.Unix(1626091200, 0),
+			},
+			End: &EventTime{
+				Time: time.Unix(1626093000, 0),
+			},
+		},
+		{
+			UID:     "2f874784-1e09-4cdc-8ae6-185c9ee36be0",
+			Summary: "many days",
+			Start: &EventTime{
+				Time:     time.Unix(1626134400, 0),
+				WholeDay: true,
+			},
+			End: &EventTime{
+				Time:     time.Unix(1626393600, 0),
+				WholeDay: true,
+			},
+		},
+	}
+
+	if diff := cmp.Diff(events, want); diff != "" {
+		t.Errorf("%s", diff)
+	}
+}
diff --git a/hswaw/site/calendar/test.ical b/hswaw/site/calendar/test.ical
new file mode 100644
index 0000000..1d5908d
--- /dev/null
+++ b/hswaw/site/calendar/test.ical
@@ -0,0 +1,49 @@
+BEGIN:VCALENDAR

+VERSION:2.0

+CALSCALE:GREGORIAN

+PRODID:-//SabreDAV//SabreDAV//EN

+X-WR-CALNAME:q3k test calendar (cc161907-84ed-42b3-b65f-8bdc79161ffe)

+X-APPLE-CALENDAR-COLOR:#1E78C1

+REFRESH-INTERVAL;VALUE=DURATION:PT4H

+X-PUBLISHED-TTL:PT4H

+BEGIN:VTIMEZONE

+TZID:Europe/Berlin

+BEGIN:DAYLIGHT

+TZOFFSETFROM:+0100

+TZOFFSETTO:+0200

+TZNAME:CEST

+DTSTART:19700329T020000

+RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU

+END:DAYLIGHT

+BEGIN:STANDARD

+TZOFFSETFROM:+0200

+TZOFFSETTO:+0100

+TZNAME:CET

+DTSTART:19701025T030000

+RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU

+END:STANDARD

+END:VTIMEZONE

+BEGIN:VEVENT

+CREATED:20210711T134245Z

+DTSTAMP:20210711T134342Z

+LAST-MODIFIED:20210711T134342Z

+SEQUENCE:3

+UID:2f874784-1e09-4cdc-8ae6-185c9ee36be0

+DTSTART;VALUE=DATE:20210713

+DTEND;VALUE=DATE:20210716

+SUMMARY:many days

+DESCRIPTION:I am a multiline\n\ndescription\n\nwith a link: https://example

+ .com/foo\n\nbarfoo

+END:VEVENT

+BEGIN:VEVENT

+CREATED:20210711T134220Z

+DTSTAMP:20210711T134323Z

+LAST-MODIFIED:20210711T134323Z

+SEQUENCE:3

+UID:65cd51ba-2fd7-475e-a274-61d19c186b66

+DTSTART;TZID=Europe/Berlin:20210712T140000

+DTEND;TZID=Europe/Berlin:20210712T143000

+SUMMARY:test event please ignore

+DESCRIPTION:I am a description

+END:VEVENT

+END:VCALENDAR

diff --git a/hswaw/site/calendar/time.go b/hswaw/site/calendar/time.go
new file mode 100644
index 0000000..f742a67
--- /dev/null
+++ b/hswaw/site/calendar/time.go
@@ -0,0 +1,73 @@
+package calendar
+
+import (
+	"fmt"
+	"time"
+
+	ics "github.com/arran4/golang-ical"
+)
+
+// EventTime is a timestamp for calendar events. It either represents a real
+// point-in time or a calender day, if it's a whole-day event.
+type EventTime struct {
+	// Time is a timestamp in the timezone originally defined for this event if
+	// WholeDay is true. Otherwise, it's a UTC time from which a year, month
+	// and day can be extracted and treated as the indication of a 'calendar
+	// day' in an unknown timezone.
+	Time time.Time
+	// WholeDay is true if this EventTime represents an entire calendar day.
+	WholeDay bool
+}
+
+func (e *EventTime) String() string {
+	if e.WholeDay {
+		return fmt.Sprintf("%s (whole day)", e.Time.Format("2006/01/02"))
+	} else {
+		return e.Time.String()
+	}
+}
+
+// parseICSTime attempts to parse a given ICS DT{START,END} object into an
+// EventTime, trying to figure out if the given object represents a timestamp
+// or a whole-day event.
+func parseICSTime(p *ics.IANAProperty) (*EventTime, error) {
+	// If this is has a VALUE of DATE, then this is a whole-day time.
+	// Otherwise, it's an actual timestamp.
+	valueList, ok := p.ICalParameters[string(ics.ParameterValue)]
+	if ok {
+		if len(valueList) != 1 || valueList[0] != "DATE" {
+			return nil, fmt.Errorf("unsupported time type: %v", valueList)
+		}
+		ts, err := time.Parse("20060102", p.Value)
+		if err != nil {
+			return nil, fmt.Errorf("could not parse date %q: %w", p.Value, err)
+		}
+		return &EventTime{
+			Time:     ts,
+			WholeDay: true,
+		}, nil
+	}
+	// You would expect that nextcloud would emit VALUE == DATE-TIME for
+	// timestamps, but that just doesn't seem to be the case. Maye I should
+	// read the ICS standard...
+
+	tzidList, ok := p.ICalParameters[string(ics.ParameterTzid)]
+	if !ok || len(tzidList) != 1 {
+		return nil, fmt.Errorf("TZID missing")
+	}
+	tzid := tzidList[0]
+	location, err := time.LoadLocation(tzid)
+	if err != nil {
+		return nil, fmt.Errorf("could not parse TZID %q: %w", tzid, err)
+	}
+
+	ts, err := time.ParseInLocation("20060102T150405", p.Value, location)
+	if err != nil {
+		return nil, fmt.Errorf("could not parse time %q: %w", p.Value, err)
+	}
+
+	return &EventTime{
+		Time:     ts,
+		WholeDay: false,
+	}, nil
+}
diff --git a/personal/q3k/b/32/BUILD.bazel b/personal/q3k/b/32/BUILD.bazel
new file mode 100644
index 0000000..ae285a1
--- /dev/null
+++ b/personal/q3k/b/32/BUILD.bazel
@@ -0,0 +1,10 @@
+load("@rules_python//python:defs.bzl", "py_binary")
+load("@pydeps//:requirements.bzl", "requirement")
+
+py_binary(
+    name = "cleanup",
+    srcs = ["cleanup.py"],
+    deps = [
+        requirement("psycopg2"),
+    ],
+)
diff --git a/personal/q3k/b/32/cleanup.py b/personal/q3k/b/32/cleanup.py
new file mode 100644
index 0000000..3ded775
--- /dev/null
+++ b/personal/q3k/b/32/cleanup.py
@@ -0,0 +1,150 @@
+# Script to attempt to clean up our owncloud database (b/32) after The Postgres
+# Fuckup (b/30).
+#
+# Think of it as a one-shot fsck, documented in the form of the code that q3k@
+# used to recover from this kerfuffle.
+#
+# SECURITY: It's full of manual SQL query crafting without parametrization.
+# Don't attempt to use it for anything else other than this one-shot usecase.
+#
+# You will need to tunnel to the postgreses running on Boston:
+#    $ ssh \
+#        -L15432:127.0.0.1:5432 \
+#        -L15433:127.0.0.1:5433 \
+#        hackerspace.pl
+
+from datetime import datetime
+import os
+
+import psycopg2
+
+
+incident_start = 1611529200 # when pg12 started to run
+incident_end = 1611788400 # when we rolled back to pg9
+
+
+OWNCLOUD_PASSWORD = os.environ.get("OWNCLOUD_PASSWORD").strip()
+if not OWNCLOUD_PASSWORD:
+    # Get it from boston, /var/www/owncloud/config/config.php.
+    raise Exception("OWNCLOUD_PASSWORD must be set to owncloud postgres password")
+
+
+conn9 = psycopg2.connect(host="localhost", port=15432, user="owncloud", password=OWNCLOUD_PASSWORD, dbname="owncloud")
+conn12 = psycopg2.connect(host="localhost", port=15433, user="owncloud", password=OWNCLOUD_PASSWORD, dbname="owncloud")
+
+
+def idset(conn, table, keyname="id"):
+    """Return a set of IDs from a given table, one per row."""
+    cur = conn.cursor()
+    cur.execute(f"SELECT {keyname} FROM oc_{table}")
+    res = cur.fetchall()
+    cur.close()
+    return set([r[0] for r in res])
+
+
+def valset(conn, table, keys):
+    """Return a set of concatenated values for the given keys in a table, one per row."""
+    keynames = ", ".join(keys)
+    cur = conn.cursor()
+    cur.execute(f"SELECT {keynames} FROM oc_{table}")
+    res = cur.fetchall()
+    cur.close()
+    res = [';;;'.join([str(elem) for elem in r]) for r in res]
+    return set(res)
+
+
+# Check accounts difference.
+#
+# RESULT: Thankfully, no accounts have been accidentally roled back.
+accounts12 = idset(conn12, "accounts", keyname="uid")
+accounts9 = idset(conn9, "accounts", keyname="uid")
+print("Accounts missing in 9:", accounts12 - accounts9)
+assert (accounts12 - accounts9) == set()
+
+
+def account_by_uid(conn, uid):
+    """Return SSO UID for a given Owncloud UID."""
+    cur = conn.cursor()
+    cur.execute(f"SELECT ldap_dn FROM oc_ldap_user_mapping WHERE owncloud_name = '{uid}'")
+    dn, = cur.fetchone()
+    cur.close()
+    part = dn.split(',')[0]
+    assert part.startswith('uid=')
+    return part[4:]
+
+
+def storage_owner_by_id(conn, id_):
+    """Return SSO UID for a given storage numerical ID."""
+    cur = conn.cursor()
+    cur.execute(f"SELECT id FROM oc_storages WHERE numeric_id = '{id_}'")
+    oid, = cur.fetchone()
+    cur.close()
+    if oid == 'object::store:amazon::nextcloud':
+        return "S3"
+    assert oid.startswith('object::user:')
+    userid = oid[13:]
+    assert len(userid) > 0
+    if userid == "gallery":
+        return "GALLERY"
+    return account_by_uid(conn, userid)
+
+
+# Check shares table. This table contains the intent of sharing some file with someone else.
+#
+# RESULT: we only have things that have been removed after rollback to PG9,
+# nothing was created in PG12 and lost.
+shareids12 = idset(conn12, "share")
+shareids9 = idset(conn9, "share")
+print("Shares missing in 9:", len(shareids12 - shareids9))
+cur12 = conn12.cursor()
+for id_ in list(shareids12-shareids9):
+    cur12.execute(f"SELECT uid_owner, file_target, stime, share_with FROM oc_share WHERE id = {id_}")
+    uid_owner, file_target, stime, share_with = cur12.fetchone()
+    account = account_by_uid(conn12, uid_owner)
+    stime_human = datetime.utcfromtimestamp(stime).strftime('%Y-%m-%d %H:%M:%S')
+    print(f"Missing share {id_} {file_target} owned by {account}..")
+    if stime < incident_start or stime > incident_end:
+        print(f"  Skipping, created at {stime_human}")
+        continue
+    raise Exception("Unhandled.")
+cur12.close()
+
+
+# Check mounts table. This contains root file storages for each user, but also
+# incoming shares 'mounted' into a user's account.
+# From what I cen tell, storage_id/root_id are the source path that's being
+# mounted (root_id being the fileid inside an oc_filecache, and storage_id
+# being the storage in which that file is kept), while user_id/mount_point are
+# the mount destination (ie. path into which this is mounted for a user's
+# view).
+#
+# RESULT: we only have share-mounts missing for a handful of users. We choose
+# to ignore it, as we assume next time these users log in they will get the
+# mounts again.
+# TODO(q3k): verify this
+mounts12 = valset(conn12, "mounts", ["storage_id", "root_id", "user_id", "mount_point"])
+mounts9 = valset(conn9, "mounts", ["storage_id", "root_id", "user_id", "mount_point"])
+print("Mounts missing in 9:", len(mounts12 - mounts9))
+# Mounts that appearify normally whenever you log into owncloud, as they are the result of shares':
+mount_names_ok = set(["2020-03-26_covid_templar", "camera", "Public Shaming", "przylbice.md", "Test.txt", "covid"])
+# Mounts that used to be from a share that existed, but has been since deleted in PG9.
+mount_names_ok |= set(["Covid-instrukcje", "Chaos_modele_covid", "Covid_proces_presspack"])
+mounts_sorted = []
+for m in list(mounts12 - mounts9):
+    storage_id, root_id, user_id, mount_point = m.split(';;;')
+    mounts_sorted.append((storage_id, root_id, user_id, mount_point))
+mounts_sorted = sorted(mounts_sorted, key=lambda el: el[2])
+for storage_id, root_id, user_id, mount_point in mounts_sorted:
+    assert mount_point.startswith("/" + user_id + "/")
+    mount_point = mount_point[len(user_id)+1:]
+    account = account_by_uid(conn12, user_id)
+    print(f"Missing mount {mount_point}, storage ID {storage_id}, owned by {account}..")
+    storage_owner = storage_owner_by_id(conn12, storage_id)
+    print(f"  Storage owner: {storage_owner}")
+
+    parts = mount_point.split('/')
+    if len(parts) == 4 and parts[0] == '' and parts[1] == 'files' and parts[2] in mount_names_ok and parts[3] == '':
+        print("  Skipping, known okay")
+        continue
+    raise Exception("Unhandled")
+
diff --git a/personal/q3k/b/32/secrets/cipher/log.txt b/personal/q3k/b/32/secrets/cipher/log.txt
new file mode 100644
index 0000000..1546b07
--- /dev/null
+++ b/personal/q3k/b/32/secrets/cipher/log.txt
@@ -0,0 +1,58 @@
+-----BEGIN PGP MESSAGE-----
+
+hQEMAzhuiT4RC8VbAQf9Ecn9tDsi84AKCyPySLpGFBj5Fp8Rc/9b3RA5f4614tqE
+2LKn5UQP7Ejg6LzCZEBlplu4CFBM5fVe+sx0pZNTVdi+qOPFYB4ruV0TLLjacaAY
+6hyD7mzmMEWVhHYHJpqCwUV8Vx7vN/6SG91nObCuEjbfYrAXjwdiXvDiBumH6d9O
+N5CTh5EYVqazZD4SvSXYPyG/iv6/1nrxlfYA/LxD+ULVPhjKBboNTjfVtjAhQVvD
+ZRMl4/rP1/WXRXz7svGaENTEG6TQ95ZrnSYPZQD+amWKumRiCdneN6I3N4s2F9dJ
++FP5WJ214rUAx/rusf+Gt5v2FPyfNw6QJR41WFrjhIUBDANcG2tp6fXqvgEH/0pJ
+Hlwe6F1ltWm5gdhUUJ5+/tJQH3e4cq0EI+26dENE9633pVA8ERFY3zmcTcxOZ04k
+K9r8+ifiagdIvWldNLKXrHaNxZWM+r4fJ1RTo0x/gskid8e9otdxo9kR8t3yh9SZ
+DHRSlCKr2+/H167h89dPWtJh5meUPWAlw2Zmcl2GDue5zjDVrHEkZ8fbxOwf9E/o
+J93oaux9Ijfrp8tP7lO90qAoaXvTAMeI7hnkWHnX2akcVh03U4FgDqLpZSlVCRP/
+mIFXRnaLuZEFdmrO+raZUi58t8xaadTZy9hsmCtlsgSKIwgA8zsF2CmJCRvZD95w
+Q70vLg91E7Bfyr1duhKFAgwDodoT8VqRl4UBEACoBXEXQaxVvZ3vClxUkxrq468/
+YC6NyTXWOt9KvpuGUWCtbFIQQ4CSfRu8UpvPasppY5shLE4L+0f79OV8aQfV4mF+
+EdHZ0zCWuVIBcYh+iiAsz5zYpbB7vSW+J+DG7ZH0pCVJZ8CSTJhK42BmyDh0a6ut
+glHqLQaeC6dPur5To+C+Ozl8v6GduJDzZQ5ERCaML0nhauH0yoEe3My52BlEZ9MK
+mjJlm3Q3VCYTfT8M08ZLTART6zMotYTemE/u+U8rVVPPDY+7kgy8yt44QAoUzE49
+shr5llK9GBjDeo4URf//0KvLs95H4TdGVYMfELcFyqyCs0YJ2vee2KvoyuXPGmkO
+6kuASSgGn5zFqdX13P0y4QIC5OxWnX22CxMdXpvG18RqCW2qyMT3wscqTCYuIHyc
+yPZWfi+MM1tDB+CdQbFLus5gWPUSZ8kK+sur/5lb0ToamWvYHhNTKMOBh8MCODAy
+F6lxrUIc81uojF8VKbgK+lOyQUtL4R3+1Wbk+9TyBYUJNjMi/S2mpL1bOUkKZkpb
+hmqloSPP7W8xa9JuxFDgyPLWpmGSJkDz+6MPiAPfPBgmczUORNuGReck4SDYlZib
+SRWrhs54zkmlZPmFCEoqbLqyFfQiNqLhQ8J/ya//HZuXAUNd9612P2a+idAAE6gx
+pz54H+jeH9o8Db01BYUCDAPiA8lOXOuz7wEP/1Uw8k0t4kPrkD1hZ0p2KFklyM6O
+Ri3j1Y2IcYcQZ23OKMx5qo90/aLBzYSRtA/NHWuqcaDjudyFJ12lSTNyQX0sFnUP
+nbig6smYzOu3XnkTnRBrOe4YN5YJiyUFTsK5wPcUcArCuLASvRCxzkwHyTQTnW6Y
+r92oArKEzXzE8sYFMPpRYpV29sQgqXUBEq0bA9codN1Z1m5N3aGvMiYyimq4jXoq
+Va+Tsry5KON2S0/h8UZsLnY/USSXjWdhb266tU9MLgY2EIK9DaTfU6mxsMTLysVQ
+RhBmtHQhzczkfueMYa7KigbXJNxvEjUlR22RVRiH9F3lhhismsW1xtgfI/IlGmQx
+6uhCFMDIsgZh39kWRP4vUxzWTvnPSD76omBcdjVTKGDEd8vqEwIBeORg9E6NoN+Q
+8HR6Fb6y0pAk30VO1mP3xC0Li9q13ips1p1w+Xu9WOPFVEwJaSFn1oaEWTuQttn3
+gPdng35LjYLnch588exe1bhoj6WiUaCclZF5yjewMCGowlvCO05QEyivRj2YGeHS
+D+oH+dv/Ex0PIJR/8P+clAcB/u3qQl2W40pPjHOmMGb8Gi+GSVU5HX0lENdmMrfw
+QfZuqKh08j8gVK77yX0UjtNhN0XVu5toCncgSFLxZSkQd4opZTonmdji0vwSapx2
+FbPI2PVqRRWjknuj0ukBu3zMhA+90PvWjmWxaSVBs0FDOvOApYUf7QwYuVpk/hkN
+GFqrarltId1Xy2UQSLHdKgb1fj4OhnVSApK9cKxD0mjJ87QibQFLeR2KCIOX4/EJ
+qAuC7xV8VTtzFQqOYF1RrXN8NbU/htMAecKYX3mj3S9FXgOUHEj/fYdFHuFhVhdw
+onJZ1CReuQX6nGU6c7bCKj7mvt/QISq7eiPT2zqt+f0X+Bz6ODkk/5QHyu2mQUTt
+PQOayDWpi5vcNsiMj8DLR7nLjErHau/joIyRMopssYzdDb5d2tsofIjoL9VzYbUu
+PTs/VJCIJ7XvRt1SA0pyQ9JUrvNhskz095CVuPF1LoA8HlcHWCUhiT19bWegsYIu
+rFqglNCrNuN2t0mQpeA9108EB8m5bmaEK6tlepwhqno3S35KnYGT+FZTo8A3xt38
+sOATC71bTPyLSNklXK+7t4YPD5nulINNLDqoxUe7ruCwZRkiWZHbp0+AON/jk5CZ
+O9pNUPA524+sLZzY1XGumifv+f7H4vExOhAMWsPAVfwBdwW4//wIeafV65RMsEUD
+SzMUfdNQVvf6w1YP3MpDvCHOLlCTrMf8fKKQAkT6Dj/nGHt5bVA10IYY/jaIdXj0
+VsONFWWaqKxmh9kVGmHdjvZ/tnDFXRPXS+3ddA/jqfVt2JzebHBDeropYYmDXMEe
+wA4Nbl5qjSoxFsmVMNROIXMrMv66LVG3kVxuHUix147qix8JTOWCFlgaBBvp16Dv
+8k52poapN6QnMnR12QWxZyroZEeTV4RvNCB/QdoVHEcx8/XBmE3psSceUh9GNU6S
+BZsb+68KosvRV9bFDcg0DJ+Qyp7k25F4+ItdSxIceW0phAQft9GJafehvnXaQj9A
+vRLuLTtM37QL8YX7vs5DKJAzG7RCgmrUfbVS6BnWhLAaUWTFgQXHd9gtw2XCkb3y
+GUCztuO3t5zRwxTlvXWt1KBdLvIm4xrmU/yfuUOK2eLmwdH+rqouVUW1fRrTXhZD
+OGyvSLIB/kujiukjIJ4idBImzmJcqMewdEdiYw/zsns3RZrfFop1IBZ8fpdAXY4m
+S0j9Yhy9qqyZ+h9ZYFG5vK6xdevaeqIGGpc7Zk7xTZjXrA+kbrjZyJa6LMUF9dFH
+MJx4tytpu546euLWP2t9OEu7T+0b9sOrQCrNNIS+qDPN37FNzvxOwCf+i5xr00nI
+7ifX1c5tA/K6b/IhDVcACqfnUAcgPg+S2qkRtleATzKE0g2ISozbw2/LNb/Th3L1
+/fH2VmHZyTHJg06PpLsaqnNFpiwIS3Kb2RKtgo3ZNLg+S6QVUd90wEMEX+cdgg==
+=aO5Z
+-----END PGP MESSAGE-----
diff --git a/third_party/go/repositories.bzl b/third_party/go/repositories.bzl
index d4355a6..12edfe1 100644
--- a/third_party/go/repositories.bzl
+++ b/third_party/go/repositories.bzl
@@ -1826,3 +1826,28 @@
         sum = "h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=",
         version = "v1.2.1",
     )
+    go_repository(
+        name = "com_github_gorilla_sessions",
+        importpath = "github.com/gorilla/sessions",
+        sum = "h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=",
+        version = "v1.2.1",
+    )
+    go_repository(
+        name = "com_github_boltdb_bolt",
+        importpath = "github.com/boltdb/bolt",
+        sum = "h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=",
+        version = "v1.3.1",
+    )
+    go_repository(
+        name = "com_github_gorilla_securecookie",
+        importpath = "github.com/gorilla/securecookie",
+        sum = "h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=",
+        version = "v1.1.1",
+    )
+    go_repository(
+        name = "com_github_arran4_golang_ical",
+        importpath = "github.com/arran4/golang-ical",
+        sum = "h1:oOgavmDMGCnNtwZwNoXuK3jCcpF3I96Do9/5qPeSCr8=",
+        version = "v0.0.0-20210601225245-48fd351b08e7",
+    )
+
diff --git a/third_party/py/requirements.txt b/third_party/py/requirements.txt
index 1a50c93..df0da45 100644
--- a/third_party/py/requirements.txt
+++ b/third_party/py/requirements.txt
@@ -15,7 +15,6 @@
 fabric==2.4.0
 Flask==1.1.1
 Flask-Login==0.4.1
-Flask-OAuthlib==0.9.5
 Flask-SQLAlchemy==2.4.0
 Flask-WTF==0.14.2
 future==0.17.1
@@ -27,13 +26,13 @@
 itsdangerous==1.1.0
 Jinja2==2.10.1
 MarkupSafe==1.1.1
-oauthlib==2.1.0
+oauthlib==3.1.1
 paramiko==2.4.2
 psycopg2==2.8.5
 pyasn1==0.4.5
 pycparser==2.19
-PyNaCl==1.3.0
 pyelftools==0.26
+PyNaCl==1.3.0
 python-dateutil==2.8.0
 pytz==2019.1
 requests==2.22.0
@@ -44,3 +43,5 @@
 urllib3==1.25.3
 Werkzeug==0.15.5
 WTForms==2.2.1
+zope.event==4.5.0
+zope.interface==5.4.0
diff --git a/tools/secretstore.py b/tools/secretstore.py
index b0d2cfe..767a0fc 100644
--- a/tools/secretstore.py
+++ b/tools/secretstore.py
@@ -49,6 +49,10 @@
     "0879F9FCA1C836677BB808C870FD60197E195C26", # implr
 ]
 
+# Currently, Patryk's GPG key is expired. This hacks around that by pretending
+# it's January 2021.
+# TODO(q3k/patryk): remove this once Patryk updates his key.
+systime = '20210101T000000'
 
 _logger_name = __name__
 if _logger_name == '__main__':
@@ -61,7 +65,15 @@
 
 
 def encrypt(src, dst):
-    cmd = ['gpg' , '--encrypt', '--armor', '--batch', '--yes', '--output', dst]
+    cmd = [
+        'gpg' , 
+        '--encrypt',
+        '--faked-system-time', systime,
+        '--trust-model', 'always',
+        '--armor',
+        '--batch', '--yes',
+        '--output', dst,
+    ]
     for k in keys:
         cmd.append('--recipient')
         cmd.append(k)
@@ -80,7 +92,7 @@
     Returns the encryption key ID for a given GPG fingerprint (eg. one from the
     'keys' list.
     """
-    cmd = ['gpg', '-k', '--keyid-format', 'long', fp]
+    cmd = ['gpg', '-k', '--faked-system-time', systime, '--keyid-format', 'long', fp]
     res = subprocess.check_output(cmd).decode()
 
     # Sample output: