Serge Bazanski | a5ed644 | 2020-09-20 22:52:57 +0000 | [diff] [blame] | 1 | # This is forked from bird.cfg.template from calico running on k0.hswaw.net on 2020/09/21. |
| 2 | # Changed vs. upstream (C-f HSCLOUD): |
| 3 | # - set 'passive on' on 127.0.0.1 neighbors, used for estabilishing connectivity |
| 4 | # with metallb. |
| 5 | # Generated by confd |
| 6 | include "bird_aggr.cfg"; |
| 7 | include "bird_ipam.cfg"; |
| 8 | |
| 9 | {{- $node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}{{$node_ip := getv $node_ip_key}} |
| 10 | {{- $router_id := getenv "CALICO_ROUTER_ID" ""}} |
| 11 | |
| 12 | {{- $node_name := getenv "NODENAME"}} |
| 13 | |
| 14 | router id {{if eq "hash" ($router_id) -}} |
| 15 | {{hashToIPv4 $node_name}}; |
| 16 | {{- else -}} |
| 17 | {{if ne "" ($router_id)}}{{$router_id}}{{else}}{{$node_ip}}{{end}}; |
| 18 | {{- end}} |
| 19 | |
| 20 | {{- define "LOGGING"}} |
| 21 | {{- $node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}} |
| 22 | {{- if exists $node_logging_key}} |
| 23 | {{- $logging := getv $node_logging_key}} |
| 24 | {{- if eq $logging "debug"}} |
| 25 | debug all; |
| 26 | {{- else if ne $logging "none"}} |
| 27 | debug { states }; |
| 28 | {{- end}} |
| 29 | {{- else if exists "/global/loglevel"}} |
| 30 | {{- $logging := getv "/global/loglevel"}} |
| 31 | {{- if eq $logging "debug"}} |
| 32 | debug all; |
| 33 | {{- else if ne $logging "none"}} |
| 34 | debug { states }; |
| 35 | {{- end}} |
| 36 | {{- else}} |
| 37 | debug { states }; |
| 38 | {{- end}} |
| 39 | {{- end}} |
| 40 | |
| 41 | # Configure synchronization between routing tables and kernel. |
| 42 | protocol kernel { |
| 43 | learn; # Learn all alien routes from the kernel |
| 44 | persist; # Don't remove routes on bird shutdown |
| 45 | scan time 2; # Scan kernel routing table every 2 seconds |
| 46 | import all; |
| 47 | export filter calico_kernel_programming; # Default is export none |
| 48 | graceful restart; # Turn on graceful restart to reduce potential flaps in |
| 49 | # routes when reloading BIRD configuration. With a full |
| 50 | # automatic mesh, there is no way to prevent BGP from |
| 51 | # flapping since multiple nodes update their BGP |
| 52 | # configuration at the same time, GR is not guaranteed to |
| 53 | # work correctly in this scenario. |
| 54 | } |
| 55 | |
| 56 | # Watch interface up/down events. |
| 57 | protocol device { |
| 58 | {{- template "LOGGING"}} |
| 59 | scan time 2; # Scan interfaces every 2 seconds |
| 60 | } |
| 61 | |
| 62 | protocol direct { |
| 63 | {{- template "LOGGING"}} |
| 64 | interface -"cali*", -"kube-ipvs*", "*"; # Exclude cali* and kube-ipvs* but |
| 65 | # include everything else. In |
| 66 | # IPVS-mode, kube-proxy creates a |
| 67 | # kube-ipvs0 interface. We exclude |
| 68 | # kube-ipvs0 because this interface |
| 69 | # gets an address for every in use |
| 70 | # cluster IP. We use static routes |
| 71 | # for when we legitimately want to |
| 72 | # export cluster IPs. |
| 73 | } |
| 74 | |
| 75 | {{if eq "" ($node_ip)}}# IPv4 disabled on this node. |
| 76 | {{else}}{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}} |
| 77 | # Template for all BGP clients |
| 78 | template bgp bgp_template { |
| 79 | {{- $as_key := or (and (exists $node_as_key) $node_as_key) "/global/as_num"}} |
| 80 | {{- $node_as_num := getv $as_key}} |
| 81 | {{- template "LOGGING"}} |
| 82 | description "Connection to BGP peer"; |
| 83 | local as {{$node_as_num}}; |
| 84 | multihop; |
| 85 | gateway recursive; # This should be the default, but just in case. |
| 86 | import all; # Import all routes, since we don't know what the upstream |
| 87 | # topology is and therefore have to trust the ToR/RR. |
| 88 | export filter calico_export_to_bgp_peers; # Only want to export routes for workloads. |
| 89 | source address {{$node_ip}}; # The local address we use for the TCP connection |
| 90 | add paths on; |
| 91 | graceful restart; # See comment in kernel section about graceful restart. |
| 92 | connect delay time 2; |
| 93 | connect retry time 5; |
| 94 | error wait time 5,30; |
| 95 | } |
| 96 | |
| 97 | # ------------- Node-to-node mesh ------------- |
| 98 | {{- $node_cid_key := printf "/host/%s/rr_cluster_id" (getenv "NODENAME")}} |
| 99 | {{- $node_cluster_id := getv $node_cid_key}} |
| 100 | {{if (json (getv "/global/node_mesh")).enabled}} |
| 101 | {{range $host := lsdir "/host"}} |
| 102 | {{$onode_as_key := printf "/host/%s/as_num" .}} |
| 103 | {{$onode_ip_key := printf "/host/%s/ip_addr_v4" .}}{{if exists $onode_ip_key}}{{$onode_ip := getv $onode_ip_key}} |
| 104 | {{$nums := split $onode_ip "."}}{{$id := join $nums "_"}} |
| 105 | # For peer {{$onode_ip_key}} |
| 106 | {{if eq $onode_ip ($node_ip) }}# Skipping ourselves ({{$node_ip}}) |
| 107 | {{else if ne "" $onode_ip}}protocol bgp Mesh_{{$id}} from bgp_template { |
| 108 | neighbor {{$onode_ip}} as {{if exists $onode_as_key}}{{getv $onode_as_key}}{{else}}{{getv "/global/as_num"}}{{end}}; |
| 109 | {{- /* |
| 110 | Make the peering unidirectional. This avoids a race where |
| 111 | - peer A opens a connection and begins a graceful restart |
| 112 | - before the restart completes, peer B opens its connection |
| 113 | - peer A sees the new connection and aborts the graceful restart, causing a route flap. |
| 114 | */ -}} |
| 115 | {{if gt $onode_ip $node_ip}} |
| 116 | passive on; # Mesh is unidirectional, peer will connect to us. |
| 117 | {{- end}} |
| 118 | }{{end}}{{end}}{{end}} |
| 119 | {{else}} |
| 120 | # Node-to-node mesh disabled |
| 121 | {{end}} |
| 122 | |
| 123 | |
| 124 | # ------------- Global peers ------------- |
| 125 | {{if ls "/global/peer_v4"}} |
| 126 | {{range gets "/global/peer_v4/*"}}{{$data := json .Value}} |
| 127 | {{$nums := split $data.ip "."}}{{$id := join $nums "_"}} |
| 128 | # For peer {{.Key}} |
| 129 | {{- if eq $data.ip ($node_ip) }} |
| 130 | # Skipping ourselves ({{$node_ip}}) |
| 131 | {{- else}} |
| 132 | protocol bgp Global_{{$id}} from bgp_template { |
| 133 | {{if eq $data.ip ("127.0.0.1")}}passive on; # HSCLOUD {{end}} |
| 134 | neighbor {{$data.ip}} as {{$data.as_num}}; |
| 135 | {{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}} |
| 136 | rr client; |
| 137 | rr cluster id {{$node_cluster_id}}; |
| 138 | {{- end}} |
| 139 | } |
| 140 | {{- end}} |
| 141 | {{end}} |
| 142 | {{else}}# No global peers configured.{{end}} |
| 143 | |
| 144 | |
| 145 | # ------------- Node-specific peers ------------- |
| 146 | {{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}} |
| 147 | {{if ls $node_peers_key}} |
| 148 | {{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}} |
| 149 | {{$nums := split $data.ip "."}}{{$id := join $nums "_"}} |
| 150 | # For peer {{.Key}} |
| 151 | {{- if eq $data.ip ($node_ip) }} |
| 152 | # Skipping ourselves ({{$node_ip}}) |
| 153 | {{- else}} |
| 154 | protocol bgp Node_{{$id}} from bgp_template { |
| 155 | neighbor {{$data.ip}} as {{$data.as_num}}; |
| 156 | {{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}} |
| 157 | rr client; |
| 158 | rr cluster id {{$node_cluster_id}}; |
| 159 | {{- end}} |
| 160 | } |
| 161 | {{- end}} |
| 162 | {{end}} |
| 163 | {{else}}# No node-specific peers configured.{{end}} |
| 164 | {{end}}{{/* End of IPv4 enable check */}} |