Merge branch 'master' into vpc-sc-02

This commit is contained in:
lcaggio 2021-05-18 09:52:45 +02:00 committed by GitHub
commit 585b3a79ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1558 additions and 66 deletions

View File

@ -4,8 +4,16 @@ All notable changes to this project will be documented in this file.
## [Unreleased]
- **incompatible change** updated resource name for `google_dns_policy` on the `net-vpc` module
- added support for VPC-SC Ingress Egress policies on the `vpc-sc` module
## [4.8.0] - 2021-05-12
- added support for `CORS` to the `gcs` module
- added support for VPC-SC Ingress Egress policies
- make cluster creation optional in the Shared VPC example
- make service account creation optional in `iam-service-account` module
- new `third-party-solutions` top-level folder with initial `openshift` example
- added support for DNS Policies to the `net-vpc` module
## [4.7.0] - 2021-04-21
@ -302,7 +310,8 @@ All notable changes to this project will be documented in this file.
- merge development branch with suite of new modules and end-to-end examples
[Unreleased]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.7.0...HEAD
[Unreleased]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.8.0...HEAD
[4.8.0]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.7.0...v4.8.0
[4.7.0]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.6.1...v4.7.0
[4.6.1]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.6.0...v4.6.1
[4.6.0]: https://github.com/terraform-google-modules/cloud-foundation-fabric/compare/v4.5.1...v4.6.0

View File

@ -20,6 +20,7 @@ Currently available examples:
- **networking** - [hub and spoke via peering](./networking/hub-and-spoke-peering/), [hub and spoke via VPN](./networking/hub-and-spoke-vpn/), [DNS and Google Private Access for on-premises](./networking/onprem-google-access-dns/), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [ILB as next hop](./networking/ilb-next-hop)
- **data solutions** - [GCE/GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms/), [Cloud Storage to Bigquery with Cloud Dataflow](./data-solutions/gcs-to-bq-with-dataflow/)
- **cloud operations** - [Resource tracking and remediation via Cloud Asset feeds](.//cloud-operations/asset-inventory-feed-remediation), [Granular Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Granular Cloud DNS IAM for Shared VPC](./cloud-operations/dns-shared-vpc), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq)
- **third party solutions** - [OpenShift cluster on Shared VPC](./third-party-solutions/openshift)
For more information see the README files in the [foundations](./foundations/), [networking](./networking/), [data solutions](./data-solutions/) and [cloud operations](./cloud-operations/) folders.

View File

@ -41,6 +41,7 @@ module "myproject-default-service-accounts" {
| *iam_project_roles* | Project roles granted to the service account, by project id. | <code title="map&#40;list&#40;string&#41;&#41;">map(list(string))</code> | | <code title="">{}</code> |
| *iam_storage_roles* | Storage roles granted to the service account, by bucket name. | <code title="map&#40;list&#40;string&#41;&#41;">map(list(string))</code> | | <code title="">{}</code> |
| *prefix* | Prefix applied to service account names. | <code title="">string</code> | | <code title="">null</code> |
| *service_account_create* | Create service account. When set to false, uses a data source to reference an existing service account. | <code title="">bool</code> | | <code title="">true</code> |
## Outputs

View File

@ -57,10 +57,23 @@ locals {
: map("", null)
, {})
prefix = var.prefix != null ? "${var.prefix}-" : ""
resource_iam_email = "serviceAccount:${google_service_account.service_account.email}"
resource_iam_email = "serviceAccount:${local.service_account.email}"
service_account = (
var.service_account_create
? try(google_service_account.service_account.0, null)
: try(data.google_service_account.service_account.0, null)
)
}
data "google_service_account" "service_account" {
count = var.service_account_create ? 0 : 1
project = var.project_id
account_id = "${local.prefix}${var.name}"
}
resource "google_service_account" "service_account" {
count = var.service_account_create ? 1 : 0
project = var.project_id
account_id = "${local.prefix}${var.name}"
display_name = var.display_name
@ -68,12 +81,12 @@ resource "google_service_account" "service_account" {
resource "google_service_account_key" "key" {
for_each = var.generate_key ? { 1 = 1 } : {}
service_account_id = google_service_account.service_account.email
service_account_id = local.service_account.email
}
resource "google_service_account_iam_binding" "roles" {
for_each = var.iam
service_account_id = google_service_account.service_account.name
service_account_id = local.service_account.name
role = each.key
members = each.value
}

View File

@ -16,12 +16,12 @@
output "service_account" {
description = "Service account resource."
value = google_service_account.service_account
value = local.service_account
}
output "email" {
description = "Service account email."
value = google_service_account.service_account.email
value = local.service_account.email
}
output "iam_email" {

View File

@ -77,3 +77,9 @@ variable "project_id" {
description = "Project id where service account will be created."
type = string
}
variable "service_account_create" {
description = "Create service account. When set to false, uses a data source to reference an existing service account."
type = bool
default = true
}

View File

@ -141,6 +141,33 @@ module "vpc" {
# tftest:modules=1:resources=4
```
### DNS Policies
```hcl
module "vpc" {
source = "./modules/net-vpc"
project_id = "my-project"
name = "my-network"
dns_policy = {
inbound = true
logging = false
outbound = {
private_ns = ["10.0.0.1"]
public_ns = ["8.8.8.8"]
}
}
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
secondary_ip_range = {}
}
]
}
# tftest:modules=1:resources=3
```
<!-- BEGIN TFDOC -->
## Variables
@ -151,6 +178,7 @@ module "vpc" {
| *auto_create_subnetworks* | Set to true to create an auto mode subnet, defaults to custom mode. | <code title="">bool</code> | | <code title="">false</code> |
| *delete_default_routes_on_create* | Set to true to delete the default routes at creation time. | <code title="">bool</code> | | <code title="">false</code> |
| *description* | An optional description of this resource (triggers recreation on change). | <code title="">string</code> | | <code title="">Terraform-managed.</code> |
| *dns_policy* | None | <code title="object&#40;&#123;&#10;inbound &#61; bool&#10;logging &#61; bool&#10;outbound &#61; object&#40;&#123;&#10;private_ns &#61; list&#40;string&#41;&#10;public_ns &#61; list&#40;string&#41;&#10;&#125;&#41;&#10;&#125;&#41;">object({...})</code> | | <code title="">null</code> |
| *iam* | Subnet IAM bindings in {REGION/NAME => {ROLE => [MEMBERS]} format. | <code title="map&#40;map&#40;list&#40;string&#41;&#41;&#41;">map(map(list(string)))</code> | | <code title="">{}</code> |
| *log_config_defaults* | Default configuration for flow logs when enabled. | <code title="object&#40;&#123;&#10;aggregation_interval &#61; string&#10;flow_sampling &#61; number&#10;metadata &#61; string&#10;&#125;&#41;">object({...})</code> | | <code title="&#123;&#10;aggregation_interval &#61; &#34;INTERVAL_5_SEC&#34;&#10;flow_sampling &#61; 0.5&#10;metadata &#61; &#34;INCLUDE_ALL_METADATA&#34;&#10;&#125;">...</code> |
| *log_configs* | Map keyed by subnet 'region/name' of optional configurations for flow logs when enabled. | <code title="map&#40;map&#40;string&#41;&#41;">map(map(string))</code> | | <code title="">{}</code> |

View File

@ -239,6 +239,38 @@ resource "google_compute_global_address" "psn_range" {
network = local.network.id
}
resource "google_dns_policy" "default" {
count = var.dns_policy == null ? 0 : 1
enable_inbound_forwarding = var.dns_policy.inbound
enable_logging = var.dns_policy.logging
name = var.name
project = var.project_id
networks {
network_url = local.network.id
}
dynamic "alternative_name_server_config" {
for_each = var.dns_policy.outbound == null ? [] : [1]
content {
dynamic "target_name_servers" {
for_each = toset(var.dns_policy.outbound.private_ns)
iterator = ns
content {
ipv4_address = ns.key
forwarding_path = "private"
}
}
dynamic "target_name_servers" {
for_each = toset(var.dns_policy.outbound.public_ns)
iterator = ns
content {
ipv4_address = ns.key
}
}
}
}
}
resource "google_service_networking_connection" "psn_connection" {
count = var.private_service_networking_range == null ? 0 : 1
network = local.network.id

View File

@ -32,6 +32,18 @@ variable "description" {
default = "Terraform-managed."
}
variable "dns_policy" {
type = object({
inbound = bool
logging = bool
outbound = object({
private_ns = list(string)
public_ns = list(string)
})
})
default = null
}
variable "iam" {
description = "Subnet IAM bindings in {REGION/NAME => {ROLE => [MEMBERS]} format."
type = map(map(list(string)))
@ -84,6 +96,19 @@ variable "peering_create_remote_end" {
default = true
}
variable "private_service_networking_range" {
description = "RFC1919 CIDR range used for Google services that support private service networking."
type = string
default = null
validation {
condition = (
var.private_service_networking_range == null ||
can(cidrnetmask(var.private_service_networking_range))
)
error_message = "Specify a valid RFC1918 CIDR range for private service networking."
}
}
variable "project_id" {
description = "The ID of the project where this VPC will be created"
type = string
@ -159,16 +184,3 @@ variable "vpc_create" {
type = bool
default = true
}
variable "private_service_networking_range" {
description = "RFC1919 CIDR range used for Google services that support private service networking."
type = string
default = null
validation {
condition = (
var.private_service_networking_range == null ||
can(cidrnetmask(var.private_service_networking_range))
)
error_message = "Specify a valid RFC1918 CIDR range for private service networking."
}
}

View File

@ -43,7 +43,7 @@ kubectl get all
The example configures the peering with the GKE master VPC to export routes for you, so that VPN routes are passed through the peering. You can diable by hand in the console or by editing the `peering_config' variable in the cluster module, to test non-working configurations or switch to using the [GKE proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies).
### Export routes via Terraform
### Export routes via Terraform (recommended)
Change the GKE cluster module and add a new variable after `private_cluster_config`:
@ -56,9 +56,9 @@ Change the GKE cluster module and add a new variable after `private_cluster_conf
If you added the variable after applying, simply apply Terraform again.
### Export routes via gcloud
### Export routes via gcloud (alternative)
The peering has a name like `gke-xxxxxxxxxxxxxxxxxxxx-xxxx-xxxx-peer`, you can edit it in the Cloud Console from the *VPC network peering* page or using `gcloud`:
If you prefer to use `gcloud` to export routes on the peering, first identify the peering (it has a name like `gke-xxxxxxxxxxxxxxxxxxxx-xxxx-xxxx-peer`) in the Cloud Console from the *VPC network peering* page, or using `gcloud`, then configure it to export routes:
```
gcloud compute networks peerings list
@ -67,6 +67,8 @@ gcloud compute networks peerings update [peering name from above] \
--network spoke-2 --export-custom-routes
```
### Test routes
Then connect via SSH to the spoke 1 instance and run the same commands you ran on the spoke 2 instance above, you should be able to run `kubectl` commands against the cluster. To test the default situation with no supporting VPN, just comment out the two VPN modules in `main.tf` and run `terraform apply` to bring down the VPN gateways and tunnels. GKE should only become accessible from spoke 2.
## Operational considerations

View File

@ -33,7 +33,7 @@ module "project" {
source = "../../modules/project"
project_create = var.project_create != null
billing_account = try(var.project_create.billing_account, null)
oslogin = try(var.project_create.oslogin, null)
oslogin = try(var.project_create.oslogin, false)
parent = try(var.project_create.parent, null)
name = var.project_id
services = [

View File

@ -1,6 +1,10 @@
# Shared VPC with GKE example
# Shared VPC with optional GKE cluster
This sample creates a basic [Shared VPC](https://cloud.google.com/vpc/docs/shared-vpc) setup using one host project and two service projects, each with a specific subnet in the shared VPC. The setup also includes the specific IAM-level configurations needed for [GKE on Shared VPC](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-shared-vpc) to enable cluster creation in one of the two service projects.
This sample creates a basic [Shared VPC](https://cloud.google.com/vpc/docs/shared-vpc) setup using one host project and two service projects, each with a specific subnet in the shared VPC.
The setup also includes the specific IAM-level configurations needed for [GKE on Shared VPC](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-shared-vpc) in one of the two service projects, and optionally creates a cluster with a single nodepool.
If you only need a basic Shared VPC, or prefer creating a cluster manually, set the `cluster_create` variable to `False`.
The sample has been purposefully kept simple so that it can be used as a basis for different Shared VPC configurations. This is the high level diagram:
@ -46,6 +50,7 @@ There's a minor glitch that can surface running `terraform destroy`, where the s
| billing_account_id | Billing account id used as default for new projects. | <code title="">string</code> | ✓ | |
| prefix | Prefix used for resources that need unique names. | <code title="">string</code> | ✓ | |
| root_node | Hierarchy node where projects will be created, 'organizations/org_id' or 'folders/folder_id'. | <code title="">string</code> | ✓ | |
| *cluster_create* | Create GKE cluster and nodepool. | <code title="">bool</code> | | <code title="">true</code> |
| *ip_ranges* | Subnet IP CIDR ranges. | <code title="map&#40;string&#41;">map(string)</code> | | <code title="&#123;&#10;gce &#61; &#34;10.0.16.0&#47;24&#34;&#10;gke &#61; &#34;10.0.32.0&#47;24&#34;&#10;&#125;">...</code> |
| *ip_secondary_ranges* | Secondary IP CIDR ranges. | <code title="map&#40;string&#41;">map(string)</code> | | <code title="&#123;&#10;gke-pods &#61; &#34;10.128.0.0&#47;18&#34;&#10;gke-services &#61; &#34;172.16.0.0&#47;24&#34;&#10;&#125;">...</code> |
| *owners_gce* | GCE project owners, in IAM format. | <code title="list&#40;string&#41;">list(string)</code> | | <code title="">[]</code> |

View File

@ -70,12 +70,18 @@ module "project-svc-gke" {
attach = true
host_project = module.project-host.project_id
}
iam = {
"roles/container.developer" = [module.vm-bastion.service_account_iam_email],
"roles/logging.logWriter" = [module.cluster-1-nodepool-1.service_account_iam_email],
"roles/monitoring.metricWriter" = [module.cluster-1-nodepool-1.service_account_iam_email],
"roles/owner" = var.owners_gke
}
iam = merge(
{
"roles/container.developer" = [module.vm-bastion.service_account_iam_email]
"roles/owner" = var.owners_gke
},
var.cluster_create
? {
"roles/logging.logWriter" = [module.cluster-1-nodepool-1.0.service_account_iam_email]
"roles/monitoring.metricWriter" = [module.cluster-1-nodepool-1.0.service_account_iam_email]
}
: {}
)
}
################################################################################
@ -193,6 +199,7 @@ module "vm-bastion" {
module "cluster-1" {
source = "../../modules/gke-cluster"
count = var.cluster_create ? 1 : 0
name = "cluster-1"
project_id = module.project-svc-gke.project_id
location = "${var.region}-b"
@ -217,9 +224,10 @@ module "cluster-1" {
module "cluster-1-nodepool-1" {
source = "../../modules/gke-nodepool"
count = var.cluster_create ? 1 : 0
name = "nodepool-1"
project_id = module.project-svc-gke.project_id
location = module.cluster-1.location
cluster_name = module.cluster-1.name
location = module.cluster-1.0.location
cluster_name = module.cluster-1.0.name
node_service_account_create = true
}

View File

@ -14,9 +14,11 @@
output "gke_clusters" {
description = "GKE clusters information."
value = {
cluster-1 = module.cluster-1.endpoint
}
value = (
var.cluster_create
? { cluster-1 = module.cluster-1.0.endpoint }
: {}
)
}
output "projects" {

View File

@ -17,6 +17,30 @@ variable "billing_account_id" {
type = string
}
variable "cluster_create" {
description = "Create GKE cluster and nodepool."
type = bool
default = true
}
variable "ip_ranges" {
description = "Subnet IP CIDR ranges."
type = map(string)
default = {
gce = "10.0.16.0/24"
gke = "10.0.32.0/24"
}
}
variable "ip_secondary_ranges" {
description = "Secondary IP CIDR ranges."
type = map(string)
default = {
gke-pods = "10.128.0.0/18"
gke-services = "172.16.0.0/24"
}
}
variable "owners_gce" {
description = "GCE project owners, in IAM format."
type = list(string)
@ -40,35 +64,6 @@ variable "prefix" {
type = string
}
variable "region" {
description = "Region used."
type = string
default = "europe-west1"
}
variable "root_node" {
description = "Hierarchy node where projects will be created, 'organizations/org_id' or 'folders/folder_id'."
type = string
}
variable "ip_ranges" {
description = "Subnet IP CIDR ranges."
type = map(string)
default = {
gce = "10.0.16.0/24"
gke = "10.0.32.0/24"
}
}
variable "ip_secondary_ranges" {
description = "Secondary IP CIDR ranges."
type = map(string)
default = {
gke-pods = "10.128.0.0/18"
gke-services = "172.16.0.0/24"
}
}
variable "private_service_ranges" {
description = "Private service IP CIDR ranges."
type = map(string)
@ -85,3 +80,14 @@ variable "project_services" {
"stackdriver.googleapis.com",
]
}
variable "region" {
description = "Region used."
type = string
default = "europe-west1"
}
variable "root_node" {
description = "Hierarchy node where projects will be created, 'organizations/org_id' or 'folders/folder_id'."
type = string
}

View File

@ -0,0 +1,9 @@
# Third Party Solutions
The examples in this folder show how to automate installation of specific third party products on GCP, following typical best practices.
## Examples
### OpenShift cluster bootstrap on Shared VPC
<a href="./openshift/" title="HubOpenShift boostrap example"><img src="./openshift/diagram.png" align="left" width="280px"></a> This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks.

View File

@ -0,0 +1,237 @@
# OpenShift on GCP user-provisioned infrastructure
This example shows how to quickly install OpenShift 4.7 on GCP user-provided infrastructure (UPI), combining [different](https://docs.openshift.com/container-platform/4.7/installing/installing_gcp/installing-gcp-user-infra-vpc.html) official [installation](https://docs.openshift.com/container-platform/4.7/installing/installing_gcp/installing-restricted-networks-gcp.html) documents into a single setup, that uses a Python script for the initial configuration via the `openshift-install` command, and a set of Terraform files to bootstrap the cluster.
Its main features are:
- remove some dependencies (eg public DNS zone) by generating the yaml file used to seed the install process
- automate the edits required to manifest files during the install process
- use Terraform to bring up the bootstrap and control plane resources
- tightly couple the install configuration and bootstrap phases via a single set of Terraform variables
- allow worker management via native OpenShift machine sets
Several GCP features and best practices are directly supported:
- internal-only clusters with no dependency on public DNS zones or load balancers
- Shared VPC support with optional separate subnets for masters, workers and load balancers
- optional encryption keys for instance disks
- optional proxy settings
The example uses a Python script to drive install configuration, and a set of Terraform files to bootstrap the cluster. The resulting infrastructure is shown in this diagram, which includes the prerequisite resources created by this example with blue icons, and the optional resources provided externally with grey icons:
![High-level diagram](diagram.png "High-level diagram")
## Prerequisites
### OpenShift commands and pull secret
From the [OpenShift GCP UPI documentation](https://cloud.redhat.com/openshift/install/gcp/user-provisioned), download
- the Installer CLI
- the Command Line CLI
- your pull secret
*Optional:* if you want to use a specific GCP RHCOS image, download it from the [RedHat library](https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.7/4.7.7/), then import it as a GCE image and configure the relevant Terraform variable before bootstrap.
### GCP projects and VPC
This example is designed to fit into enterprise GCP foundations, and assumes a Shared VPC and its associated host and service projects are already available.
If you don't have them yet, you can quickly bring them up using [our example](../../networking/shared-vpc-gke). Just remember to set the `cluster_create` variable to `false` in the Shared VPC example variables, to skip creating the associated GKE cluster.
There are several services that need to be enabled in the GP projects. In the host project, make sure `dns.googleapis.com` is enabled. In the service project, use the [RedHat reference](https://docs.openshift.com/container-platform/4.7/installing/installing_gcp/installing-restricted-networks-gcp.html#installation-gcp-enabling-api-services_installing-restricted-networks-gcp) or just enable this list:
- `cloudapis.googleapis.com`
- `cloudresourcemanager.googleapis.com`
- `compute.googleapis.com`
- `dns.googleapis.com`
- `iamcredentials.googleapis.com`
- `iam.googleapis.com`
- `servicemanagement.googleapis.com`
- `serviceusage.googleapis.com`
- `storage-api.googleapis.com`
- `storage-component.googleapis.com`
Or if you're lazy, just wait for error messages to pop up during `terraform apply`, follow the link in the error message to enable the missing service, then re-run `apply`.
### Python environment
A few Python libraries are needed by the script used to configure the installation files. The simplest option is to create a new virtualenv and install via the provided requirements file:
```bash
python3 -m venv ~/ocp-venv
. ~/ocp-venv/bin/activate
pip install -r requirements.txt
```
You can then check if the provided Python cli works:
```bash
./prepare.py --help
Usage: prepare.py [OPTIONS] COMMAND [ARGS]...
[...]
Options:
--tfdir PATH Terraform folder.
--tfvars TEXT Terraform vars file, relative to Terraform
[...]
```
### Install service account
The OpenShift install requires a privileged service account and the associated key, which is embedded as a secret in the bootstrap files, and used to create the GCP resources directly managed by the OpenShift controllers.
The secret can be removed from the cluster after bootstrap, as individual service accounts with lesser privileges are created during the bootstrap phase. Refer to the [Mint Mode](https://docs.openshift.com/container-platform/4.7/authentication/managing_cloud_provider_credentials/cco-mode-mint.html#mint-mode-permissions-gcp) and [Cloud Credential](https://docs.openshift.com/container-platform/4.6/operators/operator-reference.html#cloud-credential-operator_red-hat-operators) documentation for more details.
The simplest way to get this service account credentials with the right permissions is to create it via `gcloud`, and assign it `owner` role on the service project:
```bash
# adjust with your project id and credentials path
export OCP_SVC_PRJ=my-ocp-svc-project-id
export OCP_DIR=~/ocp
gcloud config set project $OCP_SVC_PRJ
gcloud iam service-accounts create ocp-installer
export OCP_SA=$(\
gcloud iam service-accounts list \
--filter ocp-installer --format 'value(email)' \
)
gcloud projects add-iam-policy-binding $OCP_SVC_PRJ \
--role roles/owner --member "serviceAccount:$OCP_SA"
gcloud iam service-accounts keys create $OCP_DIR/credentials.json \
--iam-account $OCP_SA
```
If you need more fine-grained control on the service account's permissions instead, refer to the Mint Mode documentation linked above for the individual roles needed.
## Installation
As mentioned above, the installation flow is split in two parts:
- generating the configuration files used to bootstrap the cluster, via the include Python script driving the `openshift-installer` cli
- creating the bootstrap and control place resources on GCP
Both steps use a common set of variables defined in Terraform, that set the basic attributes of your GCP infrastructure (projects, VPC), configure paths for prerequisite commands and resources (`openshift-install`, pull secret, etc.), define basic cluster attributes (name, domain), and allow enabling optional features (KMS encryption, proxy).
### Configuring variables
Variable configuration is best done in a `.tfvars` file, but can also be done directly in the `terraform.tfvars` file if needed. Variables names and descriptions should be self-explanatory, here are a few extra things you might want to be aware of.
<dl>
<dt><code>allowed_ranges</code></dt>
<dd>IP CIDR ranges included in the firewall rules for SSH and API server.</dd>
<dt><code>domain</code></dt>
<dd>Domain name for the parent zone, under which a zone matching the <code>cluster_name</code> variable will be created.</dd>
<dt><code>disk_encryption_key</code></dt>
<dd>Set to <code>null</code> if you are not using CMEK keys for disk encryption. If you are using it, ensure the GCE robot account has permissions on the key.</dd>
<dt><code>fs_paths</code></dt>
<dd>Filesystem paths for the external dependencies. Home path expansion is supported. The <code>config_dir</code> path is where generated ignition files will be created. Ensure it's empty (incuding hidden files) before starting the installation process.</dd>
<dt><code>host_project</code></dt>
<dd>If you don't need installing in different subnets, pass the same subnet names for the default, masters, and workers subnets.</dd>
<dt><code>install_config_params</code></dt>
<dd>The `machine` range should match addresses used for nodes.</dd>
<dt><code>post_bootstrap_config</code></dt>
<dd>Set to `null` until bootstrap completion, then refer to the post-bootstrap instructions below.</dd>
</dl>
### Generating ignition files
Once all variables match your setup, you can generate the ignition config files that will be used to bootstrap the control plane. Make sure the directory in the `fs_paths.config_dir` variable is empty before running the following command, including hidden files.
```bash
./prepare.py --tfvars my-vars.tfvars
[output]
```
The directory specified in the `fs_paths.config_dir` variable should now contain a set of ignition files, and the credentials you will use to access the cluster.
If you need to preserve the intermediate files generated by the OpenShift installer (eg `install-config.yaml` or the manifests files), check the Python script's help and run each of its individual subcommands in order.
### Bringing up the cluster
Once you have ignition files ready, change to the `tf` folder and apply:
```bash
cd tf
terraform init
terraform apply
```
If you want to preserve state (which is always a good idea), configure a [GCS backend](https://www.terraform.io/docs/language/settings/backends/gcs.html) as you would do for any other Terraform GCP setup.
### Waiting for bootstrap to complete
You have two ways of checking the bootstrap process.
The first one is from the bootstrap instance itself, by looking at its logs. The `bootstrap-ssh` Terraform output shows the command you need to use to log in. Once logged in, execute this to tail logs:
```bash
journalctl -b -f -u release-image.service -u bootkube.service
```
Wait until logs show success:
```log
May 11 07:16:38 ocp-fabric-1-px44j-b.c.tf-playground-svpc-openshift.internal bootkube.sh[2346]: bootkube.service complete
May 11 07:16:38 ocp-fabric-1-px44j-b.c.tf-playground-svpc-openshift.internal systemd[1]: bootkube.service: Succeeded.
```
The second way has less details but is more terse: copy the contents of the `fs_paths.config_dir` folder to your bastion host, then use the `openshift-install` command to wait for folder completion:
```bash
# edit commands to match your paths and names
gcloud compute scp --recurse
~/Desktop/dev/openshift/config bastion:
gcloud compute ssh bastion
openshift-install wait-for bootstrap-complete --dir config/
```
The command exits successfully once bootstrap has completed:
```log
INFO Waiting up to 20m0s for the Kubernetes API at https://api.ocp-fabric-1.ocp.ludomagno.net:6443...
INFO API v1.20.0+c8905da up
INFO Waiting up to 30m0s for bootstrapping to complete...
INFO It is now safe to remove the bootstrap resources
INFO Time elapsed: 0s
```
### Post-bootstrap tasks
Once bootstrap has completed, use the credentials in the OpenShift configuration folder to look for the name of the generated service account for the machine operator:
```bash
export KUBECONFIG=config/auth/kubeconfig
oc get CredentialsRequest openshift-machine-api-gcp \
-n openshift-cloud-credential-operator \
-o jsonpath='{.status.providerStatus.serviceAccountID}{"\n"}'
```
Take the resulting name, and put it into the `post_bootstrap_config.machine_op_sa_prefix` Terraform variable, then run `terraform apply`. This will remove the bootstrap resources which are no longer needed, and grant the correct role on the Shared VPC to the service account.
You're now ready to scale the machinesets and provision workers:
```bash
export KUBECONFIG=config/auth/kubeconfig
for m in $(oc get machineset -A -o jsonpath='{..metadata.name}'); do
oc scale machineset $m -n openshift-machine-api --replicas 1;
done
```
Then check that machines have been provisioned:
```bash
oc get machine -A
```
### Confirming the cluster is ready
After a little while, all OpenShift operators should finish their configuration. You can confirm it by checking their status:
```bash
oc get clusteroperators
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View File

@ -0,0 +1,39 @@
# skip boilerplate check
apiVersion: v1
baseDomain: ""
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform:
gcp:
osDisk:
diskType: pd-ssd
diskSizeGB: 0
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 3
metadata:
creationTimestamp: null
name: ""
networking:
clusterNetwork:
- cidr: ""
hostPrefix: 23
machineNetwork:
- cidr: ""
networkType: OpenShiftSDN
serviceNetwork:
- ""
platform:
gcp:
projectID: ""
region: ""
publish: Internal
pullSecret: ""
sshKey: |
xxx

View File

@ -0,0 +1,290 @@
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Prepare OCP installation files for UPI installation on GCP.
This module helps generating installation files for OpenShift on GCP with User
Provided Infrastructure, leveraging variables set in the accompanying Terraform
files, that create the infrastructure for a cluster.
It helps supporting features like Shared VPC, CMEK encryption for disks, etc.
'''
import glob
import logging
import os
import pathlib
import re
import subprocess
import sys
import click
import hcl
from ruamel import yaml
__author__ = 'ludomagno@google.com'
__version__ = '1.0'
class Error(Exception):
pass
def _parse_tfvars(tfvars=None, tfdir=None):
'Parse vars and tfvars files and return variables.'
logging.info('parsing tf variables')
result = {}
try:
with open(os.path.join(tfdir, 'variables.tf')) as f:
result = {k: v.get('default')
for k, v in hcl.load(f)['variable'].items()}
if tfvars:
with open(os.path.join(tfdir, tfvars)) as f:
result.update(hcl.load(f))
except (KeyError, ValueError) as e:
raise Error(f'Wrong variable files syntax: {e}')
except (IOError, OSError) as e:
raise Error(f'Cannot open variable files: {e}')
for k, v in result.items():
if k == 'post_bootstrap_config':
continue
if v is None:
raise Error(f'Terraform variable {k} not set.')
return result
def _check_convert_paths(**paths):
'Return dictionary of path objects, check they point to existing resources.'
logging.info('checking paths')
result = {}
for k, v in paths.items():
p = pathlib.Path(v).expanduser()
if not p.exists():
raise Error(f'Missing file/dir \'{p}\'.')
result[k] = p
return result
def _run_installer(cmdline, env=None):
'Run command and catch errors.'
logging.info(f'running command {" ".join(cmdline)}')
try:
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env or {})
except subprocess.CalledProcessError as e:
raise Error(f'Error running command: {e.output}')
out, err = p.communicate()
out = out.decode('utf-8', errors='ignore')
err = err.decode('utf-8', errors='ignore')
retcode = p.returncode
if retcode == 1:
raise Error(f'Return error from command ({retcode}): {out} {err}')
@click.group(invoke_without_command=True,
help=f'{__doc__}\nWith no command, run through all stages.')
@click.option('--tfdir', type=click.Path(exists=True), default='./tf',
help='Terraform folder.')
@click.option('--tfvars',
help='Terraform vars file, relative to Terraform folder.')
@click.option('-v', '--verbosity', default='INFO',
type=click.Choice(
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']),
help='Verbosity level (logging constant).')
@click.pass_context
def cli(ctx=None, credentials=None, tfdir=None, tfvars=None, verbosity='INFO'):
'Program entry point.'
logging.basicConfig(level=getattr(logging, verbosity))
logging.info('program starting')
ctx.ensure_object(dict)
try:
tfvars_ = _parse_tfvars(tfvars, tfdir)
ctx.obj['tfvars'] = tfvars_
ctx.obj['paths'] = _check_convert_paths(**tfvars_['fs_paths'])
except Error as e:
print(f'Error: {e.args[0]}')
sys.exit(1)
if ctx.invoked_subcommand is None:
commands = ['install-config', 'manifests',
'manifests-edit', 'ignition-configs']
else:
commands = [ctx.invoked_subcommand]
try:
for c in commands:
ctx.invoke(ctx.command.commands[c])
except Error as e:
print(e)
sys.exit(1)
sys.exit(0)
@cli.command(help='Generate ignition files from manifests.')
@click.pass_context
def ignition_configs(ctx=None):
'Create ignition config files from manifests.'
logging.info('generating ignition config files')
cmdline = [
str(ctx.obj['paths']['openshift_install']),
'create', 'ignition-configs',
'--dir', str(ctx.obj['paths']['config_dir'])
]
env = {'GOOGLE_CREDENTIALS': ctx.obj['paths']['credentials']}
_run_installer(cmdline, env)
@cli.command(help='Generate install config from tfvars file.')
@click.pass_context
def install_config(ctx=None):
'Create install config from terraform variables.'
logging.info('generating install config')
y = yaml.YAML()
try:
with open('install-config.tpl.yml') as f:
data = y.load(f)
except (IOError, OSError) as e:
raise Error(f'Cannot open install-config template: {e}')
except yaml.YAMLError as e:
raise Error(f'Parsing error in install-config template: {e}')
vars = ctx.obj['tfvars']
paths = ctx.obj['paths']
vars_key = vars['disk_encryption_key']
vars_net = vars['install_config_params']['network']
vars_proxy = vars['install_config_params']['proxy']
data_disk = data['compute'][0]['platform']['gcp']['osDisk']
data['baseDomain'] = vars['domain']
data['metadata']['name'] = vars['cluster_name']
data['platform']['gcp']['projectID'] = vars['service_project']['project_id']
data['platform']['gcp']['region'] = vars['region']
data_disk['diskSizeGB'] = int(vars['install_config_params']['disk_size'])
if vars_key and vars_key != 'null':
data_disk.insert(len(data_disk), 'encryptionKey', {'kmsKey': {
'projectID': vars_key['project_id'],
'keyRing': vars_key['keyring'],
'location': vars_key['location'],
'name': vars_key['name']
}})
data['networking']['clusterNetwork'][0]['cidr'] = vars_net['cluster']
data['networking']['clusterNetwork'][0]['hostPrefix'] = vars_net['host_prefix']
data['networking']['machineNetwork'][0]['cidr'] = vars_net['machine']
data['networking']['serviceNetwork'][0] = vars_net['service']
if vars_proxy and vars_proxy != 'null':
noproxy = [t.strip()
for t in vars_proxy['noproxy'].split(',') if t.strip()]
noproxy += [f'.{vars["domain"]}', vars_net['machine']]
noproxy += vars['allowed_ranges']
data.insert(len(data), 'proxy', {
'httpProxy': vars_proxy['http'],
'httpsProxy': vars_proxy['https'],
'noProxy': ','.join(noproxy)
})
for k, v in dict(pull_secret='pullSecret', ssh_key='sshKey').items():
if k not in paths:
raise Error(f'Key \'{k}\' missing from fs_paths in Terraform variables.')
try:
with paths[k].open() as f:
data[v] = f.read().strip()
except (IOError, OSError) as e:
raise Error(f'Cannot read file: {e}')
try:
with (paths['config_dir'] / 'install-config.yaml').open('w') as f:
y.dump(data, f)
except (IOError, OSError) as e:
raise Error(f'Cannot write install config: {e}')
except yaml.YAMLError as e:
raise Error(f'Error dumping install-config template: {e}')
@cli.command(help='Generate manifests from install config.')
@click.pass_context
def manifests(ctx=None):
'Create manifests from install config.'
logging.info('generating manifests')
cmdline = [
str(ctx.obj['paths']['openshift_install']),
'create', 'manifests',
'--dir', str(ctx.obj['paths']['config_dir'])
]
env = {'GOOGLE_CREDENTIALS': ctx.obj['paths']['credentials']}
_run_installer(cmdline, env)
@cli.command(help='Edit manifests.')
@click.pass_context
def manifests_edit(ctx=None):
'Edit generated manifests.'
logging.info('edit manifests')
dir_ = ctx.obj['paths']['config_dir'] / 'openshift'
for fileobj in dir_.glob('99_openshift-cluster-api_master-machines-*.yaml'):
logging.info(f'removing {fileobj.name}')
fileobj.unlink()
tfvars = ctx.obj['tfvars']
for fileobj in dir_.glob('99_openshift-cluster-api_worker-machineset-*.yaml'):
logging.info(f'editing {fileobj.name}')
y = yaml.YAML()
try:
with fileobj.open() as f:
data = y.load(f)
data_v = data['spec']['template']['spec']['providerSpec']['value']
data_v['region'] = tfvars['region']
data_v['projectID'] = tfvars['service_project']['project_id']
if not 'ocp-worker' in data_v['tags']:
data_v['tags'].append('ocp-worker')
data_n = data_v['networkInterfaces'][0]
data_n['network'] = tfvars['host_project']['vpc_name']
data_n['subnetwork'] = tfvars['host_project']['workers_subnet_name']
data_n.insert(len(data_n), 'projectID',
tfvars['host_project']['project_id'])
with fileobj.open('w') as f:
y.dump(data, f)
except (IOError, OSError, yaml.YAMLError) as e:
raise Error(f'error editing file {fileobj}: {e}')
dir_ = ctx.obj['paths']['config_dir'] / 'manifests'
fileobj = dir_ / 'cloud-provider-config.yaml'
vars_h = tfvars["host_project"]
logging.info(f'editing {fileobj.name}')
try:
with fileobj.open() as f:
data = y.load(f)
config = [
l for l in data['data']['config'].strip().split('\n')
if 'network-' not in l.rpartition('=')[0]
]
config += [
f'network-project-id = {vars_h["project_id"]}',
f'network-name = {vars_h["vpc_name"]}',
f'subnetwork-name = {vars_h["default_subnet_name"]}',
]
data['data']['config'] = '\n'.join(config)
with fileobj.open('w') as f:
y.dump(data, f)
except (IOError, OSError, yaml.YAMLError) as e:
raise Error(f'error editing file {fileobj}: {e}')
fileobj = dir_ / 'cluster-scheduler-02-config.yml'
logging.info(f'editing {fileobj.name}')
try:
with fileobj.open() as f:
data = y.load(f)
data['spec']['mastersSchedulable'] = False
with fileobj.open('w') as f:
y.dump(data, f)
except (IOError, OSError, yaml.YAMLError) as e:
raise Error(f'error editing file {fileobj}: {e}')
if __name__ == '__main__':
cli()

View File

@ -0,0 +1,3 @@
click
pyhcl
ruamel.yaml

View File

@ -0,0 +1,31 @@
# OpenShift Cluster Bootstrap
This example is a companion setup to the Python script in the parent folder, and is used to bootstrap OpenShift clusters on GCP. Refer to the documentation in the parent folder for usage instructions.
<!-- BEGIN TFDOC -->
## Variables
| name | description | type | required | default |
|---|---|:---: |:---:|:---:|
| cluster_name | Name used for the cluster and DNS zone. | <code title="">string</code> | ✓ | |
| domain | Domain name used to derive the DNS zone. | <code title="">string</code> | ✓ | |
| fs_paths | Filesystem paths for commands and data, supports home path expansion. | <code title="object&#40;&#123;&#10;credentials &#61; string&#10;config_dir &#61; string&#10;openshift_install &#61; string&#10;pull_secret &#61; string&#10;ssh_key &#61; string&#10;&#125;&#41;">object({...})</code> | ✓ | |
| host_project | Shared VPC project and network configuration. | <code title="object&#40;&#123;&#10;default_subnet_name &#61; string&#10;masters_subnet_name &#61; string&#10;project_id &#61; string&#10;vpc_name &#61; string&#10;workers_subnet_name &#61; string&#10;&#125;&#41;">object({...})</code> | ✓ | |
| service_project | Service project configuration. | <code title="object&#40;&#123;&#10;project_id &#61; string&#10;&#125;&#41;">object({...})</code> | ✓ | |
| *allowed_ranges* | Ranges that can SSH to the boostrap VM and API endpoint. | <code title="list&#40;any&#41;">list(any)</code> | | <code title="">["10.0.0.0/8"]</code> |
| *disk_encryption_key* | Optional CMEK for disk encryption. | <code title="object&#40;&#123;&#10;keyring &#61; string&#10;location &#61; string&#10;name &#61; string&#10;project_id &#61; string&#10;&#125;&#41;">object({...})</code> | | <code title="">null</code> |
| *install_config_params* | OpenShift cluster configuration. | <code title="object&#40;&#123;&#10;disk_size &#61; number&#10;network &#61; object&#40;&#123;&#10;cluster &#61; string&#10;host_prefix &#61; number&#10;machine &#61; string&#10;service &#61; string&#10;&#125;&#41;&#10;proxy &#61; object&#40;&#123;&#10;http &#61; string&#10;https &#61; string&#10;noproxy &#61; string&#10;&#125;&#41;&#10;&#125;&#41;">object({...})</code> | | <code title="&#123;&#10;disk_size &#61; 16&#10;network &#61; &#123;&#10;cluster &#61; &#34;10.128.0.0&#47;14&#34;&#10;host_prefix &#61; 23&#10;machine &#61; &#34;10.0.0.0&#47;16&#34;&#10;service &#61; &#34;172.30.0.0&#47;16&#34;&#10;&#125;&#10;proxy &#61; null&#10;&#125;">...</code> |
| *post_bootstrap_config* | Name of the service account for the machine operator. Removes bootstrap resources when set. | <code title="object&#40;&#123;&#10;machine_op_sa_prefix &#61; string&#10;&#125;&#41;">object({...})</code> | | <code title="">null</code> |
| *region* | Region where resources will be created. | <code title="">string</code> | | <code title="">europe-west1</code> |
| *rhcos_gcp_image* | RHCOS image used. | <code title="">string</code> | | <code title="">projects/rhcos-cloud/global/images/rhcos-47-83-202102090044-0-gcp-x86-64</code> |
| *tags* | Additional tags for instances. | <code title="list&#40;string&#41;">list(string)</code> | | <code title="">["ssh"]</code> |
| *zones* | Zones used for instances. | <code title="list&#40;string&#41;">list(string)</code> | | <code title="">["b", "c", "d"]</code> |
## Outputs
| name | description | sensitive |
|---|---|:---:|
| backend-health | Command to monitor API internal backend health. | |
| bootstrap-ssh | Command to SSH to the bootstrap instance. | |
| masters-ssh | Command to SSH to the master instances. | |
<!-- END TFDOC -->

View File

@ -0,0 +1,95 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
resource "google_storage_bucket" "bootstrap-ignition" {
project = var.service_project.project_id
name = local.infra_id
location = var.region
force_destroy = true
}
resource "google_storage_bucket_object" "bootstrap-ignition" {
count = local.bootstrapping ? 1 : 0
bucket = google_storage_bucket.bootstrap-ignition.name
name = "bootstrap.ign"
source = "${local.fs_paths.config_dir}/bootstrap.ign"
}
data "google_storage_object_signed_url" "bootstrap-ignition" {
count = local.bootstrapping ? 1 : 0
bucket = google_storage_bucket.bootstrap-ignition.name
path = google_storage_bucket_object.bootstrap-ignition.0.name
credentials = file(local.fs_paths.credentials)
}
resource "google_compute_instance" "bootstrap" {
count = local.bootstrapping ? 1 : 0
project = var.service_project.project_id
name = "${local.infra_id}-b"
hostname = "${local.infra_id}-bootstrap.${local.subdomain}"
machine_type = "n1-standard-4"
zone = "${var.region}-${element(var.zones, 0)}"
network_interface {
subnetwork = var.host_project.masters_subnet_name
subnetwork_project = var.host_project.project_id
}
boot_disk {
initialize_params {
image = var.rhcos_gcp_image
size = 16
type = "pd-balanced"
}
kms_key_self_link = local.disk_encryption_key
}
service_account {
email = google_service_account.default["m"].email
scopes = ["cloud-platform", "userinfo-email"]
}
tags = concat(
[local.tags.bootstrap, local.tags.master, "ocp-master"],
var.tags == null ? [] : var.tags
)
metadata = {
user-data = jsonencode({
ignition = {
config = {
replace = !local.bootstrapping ? {} : {
source = data.google_storage_object_signed_url.bootstrap-ignition.0.signed_url
}
}
version = "3.1.0"
}
})
VmDnsSetting = "GlobalDefault"
}
}
resource "google_compute_instance_group" "bootstrap" {
project = var.service_project.project_id
network = data.google_compute_network.default.self_link
zone = "${var.region}-${var.zones[0]}"
name = "${local.infra_id}-bootstrap"
description = "Openshift bootstrap group for ${local.infra_id}."
instances = [for i in google_compute_instance.bootstrap : i.self_link]
named_port {
name = "https"
port = 6443
}
named_port {
name = "ignition"
port = 22623
}
}

View File

@ -0,0 +1,38 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
resource "google_dns_managed_zone" "internal" {
project = var.service_project.project_id
name = "${local.infra_id}-private-zone"
description = "Openshift internal zone for ${local.infra_id}."
dns_name = "${local.subdomain}."
visibility = "private"
private_visibility_config {
networks {
network_url = data.google_compute_network.default.id
}
}
}
resource "google_dns_record_set" "dns" {
for_each = toset(["api", "api-int"])
project = var.service_project.project_id
name = "${each.key}.${local.subdomain}."
managed_zone = google_dns_managed_zone.internal.name
type = "A"
ttl = 60
rrdatas = [google_compute_address.api.address]
}

View File

@ -0,0 +1,137 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
resource "google_compute_firewall" "bootstrap-ssh" {
name = "${local.infra_id}-bootstrap-ssh"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = var.allowed_ranges
target_tags = [local.tags.bootstrap]
allow {
protocol = "tcp"
ports = [22]
}
}
resource "google_compute_firewall" "api" {
name = "${local.infra_id}-api"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = var.allowed_ranges
target_tags = [local.tags.master]
allow {
protocol = "tcp"
ports = [6443]
}
}
resource "google_compute_firewall" "hc" {
name = "${local.infra_id}-hc"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = [
"35.191.0.0/16", "130.211.0.0/22", "209.85.152.0/22", "209.85.204.0/22"
]
target_tags = [local.tags.master]
allow {
protocol = "tcp"
ports = [6080, 6443, 22624]
}
}
resource "google_compute_firewall" "etcd" {
name = "${local.infra_id}-etcd"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_tags = [local.tags.master]
target_tags = [local.tags.master]
allow {
protocol = "tcp"
ports = [2379, 2380]
}
}
resource "google_compute_firewall" "ctrl-plane" {
name = "${local.infra_id}-ctrl-plane"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_tags = [local.tags.master, local.tags.worker]
target_tags = [local.tags.master]
allow {
protocol = "tcp"
ports = [10257, 10259, 22623]
}
}
resource "google_compute_firewall" "internal-net" {
name = "${local.infra_id}-internal-net"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = [var.install_config_params.network.machine]
target_tags = [local.tags.master, local.tags.worker]
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = [22]
}
}
resource "google_compute_firewall" "internal-cluster" {
name = "${local.infra_id}-internal-cluster"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_tags = [local.tags.master, local.tags.worker]
target_tags = [local.tags.master, local.tags.worker]
allow {
protocol = "esp"
}
allow {
protocol = "tcp"
ports = ["9000-9999", 10250, "30000-32767"]
}
allow {
protocol = "udp"
ports = [500, 4500, 4789, 6081, "9000-9999", "30000-32767"]
}
}
resource "google_compute_firewall" "apps-hc" {
name = "${local.infra_id}-apps-hc"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = [
"35.191.0.0/16", "130.211.0.0/22", "209.85.152.0/22", "209.85.204.0/22"
]
target_tags = [local.tags.worker]
allow {
protocol = "tcp"
ports = ["30000-32767"]
}
}
resource "google_compute_firewall" "apps" {
name = "${local.infra_id}-apps"
project = var.host_project.project_id
network = var.host_project.vpc_name
source_ranges = var.allowed_ranges
target_tags = [local.tags.worker]
allow {
protocol = "tcp"
ports = [80, 443]
}
}

View File

@ -0,0 +1,83 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
locals {
minimal_sa_roles = [
"roles/logging.logWriter",
"roles/monitoring.metricWriter"
]
}
resource "google_service_account" "default" {
for_each = { m = "master", w = "worker" }
project = var.service_project.project_id
account_id = "${local.infra_id}-${each.key}"
display_name = "Openshift ${each.value} for ${local.infra_id}."
}
# https://docs.openshift.com/container-platform/4.7/installing/installing_gcp/installing-gcp-user-infra-vpc.html#installation-creating-gcp-iam-shared-vpc_installing-gcp-user-infra-vpc
resource "google_project_iam_member" "host-master" {
for_each = toset([
"roles/compute.networkUser",
"roles/compute.networkViewer"
])
project = var.host_project.project_id
role = each.key
member = "serviceAccount:${google_service_account.default["m"].email}"
}
resource "google_project_iam_member" "host-worker" {
for_each = toset([
"roles/compute.networkUser"
])
project = var.host_project.project_id
role = each.key
member = "serviceAccount:${google_service_account.default["w"].email}"
}
# This on the other hand seems excessive
# https://docs.openshift.com/container-platform/4.7/installing/installing_gcp/installing-restricted-networks-gcp.html#installation-creating-gcp-iam-shared-vpc_installing-restricted-networks-gcp
resource "google_project_iam_member" "service-master" {
for_each = toset(concat(local.minimal_sa_roles, [
"roles/compute.instanceAdmin",
"roles/compute.networkAdmin",
"roles/compute.securityAdmin",
"roles/iam.serviceAccountUser",
"roles/storage.admin"
]))
project = var.service_project.project_id
role = each.key
member = "serviceAccount:${google_service_account.default["m"].email}"
}
resource "google_project_iam_member" "service-worker" {
for_each = toset(concat(local.minimal_sa_roles, [
"roles/compute.viewer",
"roles/storage.admin"
]))
project = var.service_project.project_id
role = each.key
member = "serviceAccount:${google_service_account.default["w"].email}"
}
resource "google_project_iam_member" "machineset-operator" {
count = local.machine_sa == null ? 0 : 1
project = var.host_project.project_id
role = "roles/compute.networkUser"
member = "serviceAccount:${local.machine_sa}@${var.service_project.project_id}.iam.gserviceaccount.com"
}

View File

@ -0,0 +1,68 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
resource "google_compute_address" "api" {
project = var.service_project.project_id
name = "${local.infra_id}-ilb"
address_type = "INTERNAL"
region = var.region
subnetwork = data.google_compute_subnetwork.default["default"].self_link
}
resource "google_compute_health_check" "api" {
project = var.service_project.project_id
name = "${local.infra_id}-api"
https_health_check {
port = 6443
request_path = "/readyz"
}
}
resource "google_compute_region_backend_service" "api" {
project = var.service_project.project_id
name = "${local.infra_id}-api"
load_balancing_scheme = "INTERNAL"
region = var.region
network = data.google_compute_network.default.self_link
health_checks = [google_compute_health_check.api.self_link]
protocol = "TCP"
dynamic "backend" {
for_each = google_compute_instance_group.master
content {
group = backend.value.self_link
}
}
dynamic "backend" {
for_each = toset(local.bootstrapping ? [""] : [])
content {
group = google_compute_instance_group.bootstrap.self_link
}
}
}
resource "google_compute_forwarding_rule" "api" {
project = var.service_project.project_id
name = "${local.infra_id}-api"
load_balancing_scheme = "INTERNAL"
region = var.region
network = data.google_compute_network.default.self_link
subnetwork = data.google_compute_subnetwork.default["default"].self_link
ip_address = google_compute_address.api.address
ip_protocol = "TCP"
ports = [6443, 22623]
allow_global_access = true
backend_service = google_compute_region_backend_service.api.self_link
}

View File

@ -0,0 +1,61 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
locals {
bootstrapping = var.post_bootstrap_config == null
cluster_name = local.install_metadata["clusterName"]
disk_encryption_key = (
var.disk_encryption_key == null
? null
: data.google_kms_crypto_key.default.0.id
)
fs_paths = { for k, v in var.fs_paths : k => pathexpand(v) }
infra_id = local.install_metadata["infraID"]
install_metadata = jsondecode(file(
"${local.fs_paths.config_dir}/metadata.json"
))
machine_sa = try(var.post_bootstrap_config.machine_op_sa_prefix, null)
router_address = try(var.post_bootstrap_config.router_address, null)
subdomain = "${var.cluster_name}.${var.domain}"
tags = {
for n in ["bootstrap", "master", "worker"] : n => "${local.infra_id}-${n}"
}
}
data "google_compute_network" "default" {
project = var.host_project.project_id
name = var.host_project.vpc_name
}
data "google_compute_subnetwork" "default" {
for_each = toset(["default", "masters", "workers"])
project = var.host_project.project_id
region = var.region
name = var.host_project["${each.key}_subnet_name"]
}
data "google_kms_key_ring" "default" {
count = var.disk_encryption_key == null ? 0 : 1
project = var.disk_encryption_key.project_id
location = var.disk_encryption_key.location
name = var.disk_encryption_key.keyring
}
data "google_kms_crypto_key" "default" {
count = var.disk_encryption_key == null ? 0 : 1
key_ring = data.google_kms_key_ring.default.0.self_link
name = var.disk_encryption_key.name
}

View File

@ -0,0 +1,66 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
resource "google_compute_instance" "master" {
for_each = toset(var.zones)
project = var.service_project.project_id
name = "${local.infra_id}-master-${each.key}"
hostname = "${local.infra_id}-master-${each.key}.${local.subdomain}"
machine_type = "n1-standard-4"
zone = "${var.region}-${each.key}"
network_interface {
subnetwork = var.host_project.masters_subnet_name
subnetwork_project = var.host_project.project_id
}
boot_disk {
initialize_params {
image = var.rhcos_gcp_image
size = var.install_config_params.disk_size
type = "pd-ssd"
}
kms_key_self_link = local.disk_encryption_key
}
service_account {
email = google_service_account.default["m"].email
scopes = ["cloud-platform", "userinfo-email"]
}
tags = concat(
[local.tags.master, "ocp-master"],
var.tags == null ? [] : var.tags
)
metadata = {
user-data = file("${local.fs_paths.config_dir}/master.ign"),
VmDnsSetting = "GlobalDefault"
}
}
resource "google_compute_instance_group" "master" {
for_each = toset(var.zones)
project = var.service_project.project_id
network = data.google_compute_network.default.self_link
zone = "${var.region}-${each.key}"
name = "${local.infra_id}-master-${each.key}"
description = "Openshift master group for ${local.infra_id} in zone ${each.key}."
instances = [google_compute_instance.master[each.key].self_link]
named_port {
name = "https"
port = 6443
}
named_port {
name = "ignition"
port = 22623
}
}

View File

@ -0,0 +1,47 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
output "backend-health" {
description = "Command to monitor API internal backend health."
value = <<END
gcloud compute backend-services get-health ${google_compute_region_backend_service.api.name} \
--project ${google_compute_region_backend_service.api.project} \
--region ${google_compute_region_backend_service.api.region} \
--format 'value(backend, status.healthStatus.healthState)'
END
}
output "bootstrap-ssh" {
description = "Command to SSH to the bootstrap instance."
value = !local.bootstrapping ? null : <<END
gcloud compute ssh core@${google_compute_instance.bootstrap.0.name} \
--project ${google_compute_instance.bootstrap.0.project} \
--zone ${google_compute_instance.bootstrap.0.zone} \
--ssh-key-file ${replace(var.fs_paths.ssh_key, ".pub", "")}
END
}
output "masters-ssh" {
description = "Command to SSH to the master instances."
value = {
for k, v in google_compute_instance.master : k => <<END
gcloud compute ssh core@${v.name} \
--project ${v.project} \
--zone ${v.zone} \
--ssh-key-file ${replace(var.fs_paths.ssh_key, ".pub", "")}
END
}
}

View File

@ -0,0 +1,26 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
# pinning to avoid some weird issues we had with the following version
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "3.65.0"
}
}
}

View File

@ -0,0 +1,137 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
variable "allowed_ranges" {
description = "Ranges that can SSH to the boostrap VM and API endpoint."
type = list(any)
default = ["10.0.0.0/8"]
}
variable "cluster_name" {
description = "Name used for the cluster and DNS zone."
type = string
}
variable "domain" {
description = "Domain name used to derive the DNS zone."
type = string
}
variable "disk_encryption_key" {
description = "Optional CMEK for disk encryption."
type = object({
keyring = string
location = string
name = string
project_id = string
})
default = null
}
variable "host_project" {
description = "Shared VPC project and network configuration."
type = object({
default_subnet_name = string
masters_subnet_name = string
project_id = string
vpc_name = string
workers_subnet_name = string
})
}
# https://github.com/openshift/installer/blob/master/docs/user/customization.md
variable "install_config_params" {
description = "OpenShift cluster configuration."
type = object({
disk_size = number
network = object({
cluster = string
host_prefix = number
machine = string
service = string
})
proxy = object({
http = string
https = string
noproxy = string
})
})
default = {
disk_size = 16
network = {
cluster = "10.128.0.0/14"
host_prefix = 23
machine = "10.0.0.0/16"
service = "172.30.0.0/16"
}
proxy = null
}
}
variable "fs_paths" {
description = "Filesystem paths for commands and data, supports home path expansion."
type = object({
credentials = string
config_dir = string
openshift_install = string
pull_secret = string
ssh_key = string
})
}
# oc -n openshift-cloud-credential-operator get CredentialsRequest \
# openshift-machine-api-gcp \
# -o jsonpath='{.status.providerStatus.serviceAccountID}{"\n"}'
variable "post_bootstrap_config" {
description = "Name of the service account for the machine operator. Removes bootstrap resources when set."
type = object({
machine_op_sa_prefix = string
})
default = null
}
variable "region" {
description = "Region where resources will be created."
type = string
default = "europe-west1"
}
variable "rhcos_gcp_image" {
description = "RHCOS image used."
type = string
default = "projects/rhcos-cloud/global/images/rhcos-47-83-202102090044-0-gcp-x86-64"
}
variable "service_project" {
description = "Service project configuration."
type = object({
project_id = string
})
}
variable "tags" {
description = "Additional tags for instances."
type = list(string)
default = ["ssh"]
}
variable "zones" {
description = "Zones used for instances."
type = list(string)
default = ["b", "c", "d"]
}