Merge pull request #1282 from GoogleCloudPlatform/bruzz/nva-firewall-mgmt

Added local firewall management (iptables) on the NVA for dealing with COS default deny on inbound connections
This commit is contained in:
simonebruzzechesse 2023-03-27 16:32:56 +02:00 committed by GitHub
commit b6880104d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 84 additions and 34 deletions

View File

@ -1,8 +1,14 @@
# Google Simple NVA Module
This module allows for the creation of a NVA (Network Virtual Appliance) to be used for experiments and as a stub for future appliances deployment.
The module allows you to create Network Virtual Appliances (NVAs) as a stub for future appliances deployments.
This NVA can be used to interconnect up to 8 VPCs.
This NVAs can be used to interconnect up to 8 VPCs.
The NVAs run [Container-Optimized OS (COS)](https://cloud.google.com/container-optimized-os/docs). COS is a Linux-based OS designed for running containers. By default, it only allows SSH ingress connections. To see the exact host firewall configuration, run `sudo iptables -L -v`. More info available in the [official](https://cloud.google.com/container-optimized-os/docs/how-to/firewall) documentation.
To configure the firewall, you can either
- use the [open_ports](variables.tf#L84) variable
- for a thiner grain control, pass a custom bash script at startup with iptables commands
## Examples
@ -65,10 +71,9 @@ module "vm" {
# tftest modules=1 resources=1
```
### Example with advanced routing capabilities
### Example with advanced routing capabilities (FRR)
Find below a sample terraform example for bootstrapping a simple NVA powered by [COS](https://cloud.google.com/container-optimized-os/docs) and running [FRRouting](https://frrouting.org/) container.
Please find below a sample frr.conf file based on the documentation available [here](https://docs.frrouting.org/en/latest/basic.html) for hosting a BGP service with ASN 65001 on FRR container establishing a BGP session with a remote neighbor with IP address 10.128.0.2 and ASN 65002.
The sample code brings up [FRRouting](https://frrouting.org/) container.
```
# tftest-file id=frr_conf path=./frr.conf
@ -112,7 +117,7 @@ module "cos-nva" {
enable_health_checks = true
network_interfaces = local.network_interfaces
frr_config = { config_file = "./frr.conf", daemons_enabled = ["bgpd"] }
optional_run_cmds = ["ls -l"]
run_cmds = ["ls -l"]
}
module "vm" {
@ -134,6 +139,20 @@ module "vm" {
}
# tftest modules=1 resources=1 files=frr_conf
```
The FRR container is managed as a systemd service. To interact with the service, use the standard systemd commands: `sudo systemctl {start|stop|restart} frr`.
To interact with the FRR CLI run:
```shell
# get the container ID
CONTAINER_ID =`sudo docker ps -a -q`
sudo docker exec -it $CONTAINER_ID vtysh
```
Check FRR running configuration with `show running-config` from vtysh. Please always refer to the official documentation for more information how to deal with vtysh and useful commands.
Sample frr.conf file is based on the documentation available [here](https://docs.frrouting.org/en/latest/basic.html). It configures a BGP service with ASN 65001 on FRR container establishing a BGP session with a remote neighbor with IP address 10.128.0.2 and ASN 65002. Check BGP status for FRR with `show bgp summary` from vtysh.
<!-- BEGIN TFDOC -->
## Variables
@ -145,7 +164,8 @@ module "vm" {
| [enable_health_checks](variables.tf#L23) | Configures routing to enable responses to health check probes. | <code>bool</code> | | <code>false</code> |
| [files](variables.tf#L29) | Map of extra files to create on the instance, path as key. Owner and permissions will use defaults if null. | <code title="map&#40;object&#40;&#123;&#10; content &#61; string&#10; owner &#61; string&#10; permissions &#61; string&#10;&#125;&#41;&#41;">map&#40;object&#40;&#123;&#8230;&#125;&#41;&#41;</code> | | <code>&#123;&#125;</code> |
| [frr_config](variables.tf#L39) | FRR configuration for container running on the NVA. | <code title="object&#40;&#123;&#10; config_file &#61; string&#10; daemons_enabled &#61; optional&#40;list&#40;string&#41;&#41;&#10;&#125;&#41;">object&#40;&#123;&#8230;&#125;&#41;</code> | | <code>null</code> |
| [optional_run_cmds](variables.tf#L84) | Optional Cloud Init run commands to execute. | <code>list&#40;string&#41;</code> | | <code>&#91;&#93;</code> |
| [open_ports](variables.tf#L84) | Optional firewall ports to open. | <code title="object&#40;&#123;&#10; tcp &#61; list&#40;string&#41;&#10; udp &#61; list&#40;string&#41;&#10;&#125;&#41;">object&#40;&#123;&#8230;&#125;&#41;</code> | | <code title="&#123;&#10; tcp &#61; &#91;&#93;&#10; udp &#61; &#91;&#93;&#10;&#125;">&#123;&#8230;&#125;</code> |
| [run_cmds](variables.tf#L96) | Optional cloud init run commands to execute. | <code>list&#40;string&#41;</code> | | <code>&#91;&#93;</code> |
## Outputs

View File

@ -55,6 +55,12 @@ write_files:
ip route add ${route} via `curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/${interface.number}/gateway -H "Metadata-Flavor:Google"` dev ${interface.name}
%{ endfor ~}
%{ endfor ~}
%{ for port in open_tcp_ports ~}
iptables -A INPUT -p tcp --dport ${port} -j ACCEPT
%{ endfor ~}
%{ for port in open_udp_ports ~}
iptables -A INPUT -p udp --dport ${port} -j ACCEPT
%{ endfor ~}
bootcmd:
- systemctl start node-problem-detector
@ -63,6 +69,6 @@ runcmd:
- systemctl daemon-reload
- systemctl enable routing
- systemctl start routing
%{ for cmd in optional_run_cmds ~}
%{ for cmd in run_cmds ~}
- ${cmd}
%{ endfor ~}

View File

@ -67,29 +67,29 @@ locals {
} : {}
)
_frr_daemons = [
"zebra",
"bgpd",
"ospfd",
"ospf6d",
"ripd",
"ripngd",
"isisd",
"pimd",
"ldpd",
"nhrpd",
"eigrpd",
"babeld",
"sharpd",
"staticd",
"pbrd",
"bfdd",
"fabricd"
]
_frr_daemons = {
"zebra" : { tcp = [], udp = [] }
"bgpd" : { tcp = ["179"], udp = [] }
"ospfd" : { tcp = [], udp = [] }
"ospf6d" : { tcp = [], udp = [] }
"ripd" : { tcp = [], udp = ["520"] }
"ripngd" : { tcp = [], udp = ["521"] }
"isisd" : { tcp = [], udp = [] }
"pimd" : { tcp = [], udp = [] }
"ldpd" : { tcp = ["646"], udp = ["646"] }
"nhrpd" : { tcp = [], udp = [] }
"eigrpd" : { tcp = [], udp = [] }
"babeld" : { tcp = [], udp = [] }
"sharpd" : { tcp = [], udp = [] }
"staticd" : { tcp = [], udp = [] }
"pbrd" : { tcp = [], udp = [] }
"bfdd" : { tcp = [], udp = ["3784"] }
"fabricd" : { tcp = [], udp = [] }
}
_frr_daemons_enabled = try(
{
for daemon in local._frr_daemons :
for daemon in keys(local._frr_daemons) :
"${daemon}_enabled" => contains(var.frr_config.daemons_enabled, daemon) ? "yes" : "no"
}, {})
@ -103,22 +103,34 @@ locals {
}
]
_optional_run_cmds = (
_run_cmds = (
try(var.frr_config != null, false)
? concat(["systemctl start frr"], var.optional_run_cmds)
: var.optional_run_cmds
? concat(["systemctl start frr"], var.run_cmds)
: var.run_cmds
)
_tcp_ports = concat(flatten(try(
[
for daemon, ports in local._frr_daemons : contains(var.frr_config.daemons_enabled, daemon) ? ports.tcp : []
], [])), var.open_ports.tcp)
_template = (
var.cloud_config == null
? "${path.module}/cloud-config.yaml"
: var.cloud_config
)
_udp_ports = concat(flatten(try(
[
for daemon, ports in local._frr_daemons : contains(var.frr_config.daemons_enabled, daemon) ? ports.udp : []
], [])), var.open_ports.udp)
cloud_config = templatefile(local._template, {
enable_health_checks = var.enable_health_checks
files = local._files
network_interfaces = local._network_interfaces
optional_run_cmds = local._optional_run_cmds
open_tcp_ports = local._tcp_ports
open_udp_ports = local._udp_ports
run_cmds = local._run_cmds
})
}

View File

@ -81,8 +81,20 @@ variable "network_interfaces" {
}))
}
variable "optional_run_cmds" {
description = "Optional Cloud Init run commands to execute."
variable "open_ports" {
description = "Optional firewall ports to open."
type = object({
tcp = list(string)
udp = list(string)
})
default = {
tcp = []
udp = []
}
}
variable "run_cmds" {
description = "Optional cloud init run commands to execute."
type = list(string)
default = []
}