diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2a903525..3fc3fe56 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,7 +33,7 @@ env: TF_VERSION: 1.3.2 jobs: - doc-examples: + examples: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -68,7 +68,7 @@ jobs: pip install -r tests/requirements.txt pytest -vv tests/examples - examples: + blueprints: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f751674..65841252 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,12 @@ All notable changes to this project will be documented in this file. ### BLUEPRINTS +- [[#924](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/924)] Fix formatting for gcloud dataflow job launch command ([aymanfarhat](https://github.com/aymanfarhat)) +- [[#921](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/921)] Align documentation, move glb blueprint ([ludoo](https://github.com/ludoo)) +- [[#915](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/915)] TFE OIDC with GCP WIF blueprint added ([averbuks](https://github.com/averbuks)) +- [[#899](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/899)] Static routes monitoring metrics added to network dashboard BP ([maunope](https://github.com/maunope)) +- [[#909](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/909)] GCS2BQ: Move images and templates in sub-folders ([lcaggio](https://github.com/lcaggio)) +- [[#907](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/907)] Fix CloudSQL blueprint ([lcaggio](https://github.com/lcaggio)) - [[#897](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/897)] Project-factory: allow folder_id to be defined in defaults_file ([Malet](https://github.com/Malet)) - [[#900](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/900)] Improve net dashboard variables ([juliocc](https://github.com/juliocc)) - [[#896](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/896)] Network Dashboard: CFv2 and performance improvements ([aurelienlegrand](https://github.com/aurelienlegrand)) @@ -37,6 +43,7 @@ All notable changes to this project will be documented in this file. ### DOCUMENTATION +- [[#921](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/921)] Align documentation, move glb blueprint ([ludoo](https://github.com/ludoo)) - [[#898](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/898)] Update FAST bootstrap README.md ([juliocc](https://github.com/juliocc)) - [[#878](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/878)] chore: update cft and fabric ([bharathkkb](https://github.com/bharathkkb)) - [[#863](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/863)] Fabric vs CFT doc ([ludoo](https://github.com/ludoo)) @@ -44,6 +51,7 @@ All notable changes to this project will be documented in this file. ### FAST +- [[#911](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/911)] FAST: Additional PGA DNS records ([sruffilli](https://github.com/sruffilli)) - [[#903](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/903)] Initial replacement for CI/CD stage ([ludoo](https://github.com/ludoo)) - [[#898](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/898)] Update FAST bootstrap README.md ([juliocc](https://github.com/juliocc)) - [[#880](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/880)] **incompatible change:** Refactor net-vpc module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) @@ -63,6 +71,14 @@ All notable changes to this project will be documented in this file. ### MODULES +- [[#926](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/926)] Fix backwards compatibility for vpc subnet descriptions ([ludoo](https://github.com/ludoo)) +- [[#927](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/927)] Add support for deployment type and api proxy type for Apigee org ([kmucha555](https://github.com/kmucha555)) +- [[#923](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/923)] Fix service account creation error in gke nodepool module ([ludoo](https://github.com/ludoo)) +- [[#908](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/908)] GKE module: autopilot fixes ([ludoo](https://github.com/ludoo)) +- [[#906](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/906)] GKE module: add managed_prometheus to features ([apichick](https://github.com/apichick)) +- [[#916](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/916)] Add support for DNS routing policies ([juliocc](https://github.com/juliocc)) +- [[#918](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/918)] Fix race condition in SimpleNVA ([sruffilli](https://github.com/sruffilli)) +- [[#914](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/914)] **incompatible change:** Update DNS module ([juliocc](https://github.com/juliocc)) - [[#904](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/904)] Add missing description field ([dsbutler101](https://github.com/dsbutler101)) - [[#891](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/891)] Add internal_ips output to compute-vm module ([LucaPrete](https://github.com/LucaPrete)) - [[#890](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/890)] Add auto_delete and instance_redistribution_type to compute-vm and compute-mig modules. ([giovannibaratta](https://github.com/giovannibaratta)) @@ -95,6 +111,7 @@ All notable changes to this project will be documented in this file. ### TOOLS +- [[#919](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/919)] Rename workflow names ([juliocc](https://github.com/juliocc)) - [[#902](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/902)] Bring back sorted variables check ([juliocc](https://github.com/juliocc)) - [[#887](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/887)] Disable parallel execution of tests and plugin cache ([ludoo](https://github.com/ludoo)) - [[#886](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/886)] Revert "Improve handling of tf plugin cache in tests" ([ludoo](https://github.com/ludoo)) diff --git a/README.md b/README.md index 70d5d666..6aa292d7 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This repository provides **end-to-end blueprints** and a **suite of Terraform mo - organization-wide [landing zone blueprint](fast/) used to bootstrap real-world cloud foundations - reference [blueprints](./blueprints/) used to deep dive on network patterns or product features -- a comprehensive source of lean [modules](./modules/dns) that lend themselves well to changes +- a comprehensive source of lean [modules](./modules/) that lend themselves well to changes The whole repository is meant to be cloned as a single unit, and then forked into separate owned repositories to seed production usage, or used as-is and periodically updated as a complete toolkit for prototyping. You can read more on this approach in our [contributing guide](./CONTRIBUTING.md), and a comparison against similar toolkits [here](./FABRIC-AND-CFT.md). @@ -29,16 +29,16 @@ The current list of modules supports most of the core foundational and networkin Currently available modules: -- **foundational** - [folder](./modules/folder), [organization](./modules/organization), [project](./modules/project), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [billing budget](./modules/billing-budget), [projects-data-source](./modules/projects-data-source), [organization-policy](./modules/organization-policy) -- **networking** - [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC peering](./modules/net-vpc-peering), [VPN static](./modules/net-vpn-static), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [NAT](./modules/net-cloudnat), [address reservation](./modules/net-address), [DNS](./modules/dns), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [Service Directory](./modules/service-directory), [Cloud Endpoints](./modules/endpoints) -- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [GKE cluster](./modules/gke-cluster), [GKE nodepool](./modules/gke-nodepool), [GKE hub](./modules/gke-hub), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid) -- **data** - [GCS](./modules/gcs), [BigQuery dataset](./modules/bigquery-dataset), [Pub/Sub](./modules/pubsub), [Datafusion](./modules/datafusion), [Bigtable instance](./modules/bigtable-instance), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag) -- **development** - [Cloud Source Repository](./modules/source-repository), [Container Registry](./modules/container-registry), [Artifact Registry](./modules/artifact-registry), [Apigee Organization](./modules/apigee-organization), [Apigee X Instance](./modules/apigee-x-instance), [API Gateway](./modules/api-gateway) -- **security** - [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc) +- **foundational** - [billing budget](./modules/billing-budget), [Cloud Identity group](./modules/cloud-identity-group/), [folder](./modules/folder), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [organization](./modules/organization), [organization-policy](./modules/organization-policy), [project](./modules/project), [projects-data-source](./modules/projects-data-source) +- **networking** - [DNS](./modules/dns), [Cloud Endpoints](./modules/endpoints), [address reservation](./modules/net-address), [NAT](./modules/net-cloudnat), [Global Load Balancer (classic)](./modules/net-glb/), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC peering](./modules/net-vpc-peering), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [VPN static](./modules/net-vpn-static), [Service Directory](./modules/service-directory) +- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid), [GKE cluster](./modules/gke-cluster), [GKE hub](./modules/gke-hub), [GKE nodepool](./modules/gke-nodepool) +- **data** - [BigQuery dataset](./modules/bigquery-dataset), [Bigtable instance](./modules/bigtable-instance), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag), [Datafusion](./modules/datafusion), [GCS](./modules/gcs), [Pub/Sub](./modules/pubsub) +- **development** - [API Gateway](./modules/api-gateway), [Apigee Organization](./modules/apigee-organization), [Apigee X Instance](./modules/apigee-x-instance), [Artifact Registry](./modules/artifact-registry), [Container Registry](./modules/container-registry), [Cloud Source Repository](./modules/source-repository) +- **security** - [Binauthz](./modules/binauthz/), [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc) - **serverless** - [Cloud Function](./modules/cloud-function), [Cloud Run](./modules/cloud-run) For more information and usage examples see each module's README file. ## End-to-end blueprints -The [blueprints](./blueprints/) in this repository are split in several main sections: **[networking blueprints](./blueprints/networking/)** that implement core patterns or features, **[data solutions blueprints](./blueprints/data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./blueprints/cloud-operations/)** that leverage specific products to meet specific operational needs and **[factories](./blueprints/factories/)** that implement resource factories for the repetitive creation of specific resources, and finally **[GKE](./blueprints/gke)** and **[serverless](./blueprints/serverless)** design blueprints. +The [blueprints](./blueprints/) in this repository are split in several main sections: **[networking blueprints](./blueprints/networking/)** that implement core patterns or features, **[data solutions blueprints](./blueprints/data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./blueprints/cloud-operations/)** that leverage specific products to meet specific operational needs and **[factories](./blueprints/factories/)** that implement resource factories for the repetitive creation of specific resources, and finally **[GKE](./blueprints/gke)**, **[serverless](./blueprints/serverless)**, and **[third-party solutions](./blueprints/third-party-solutions/)** design blueprints. diff --git a/blueprints/cloud-operations/glb_and_armor/shell_button.png b/assets/images/cloud-shell-button.png similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/shell_button.png rename to assets/images/cloud-shell-button.png diff --git a/blueprints/README.md b/blueprints/README.md index aad7cb08..77e13906 100644 --- a/blueprints/README.md +++ b/blueprints/README.md @@ -4,12 +4,12 @@ This section **[networking blueprints](./networking/)** that implement core patt Currently available blueprints: -- **cloud operations** - [Resource tracking and remediation via Cloud Asset feeds](./cloud-operations/asset-inventory-feed-remediation), [Granular Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Granular Cloud DNS IAM for Shared VPC](./cloud-operations/dns-shared-vpc), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Packer image builder](./cloud-operations/packer-image-builder), [On-prem SA key management](./cloud-operations/onprem-sa-key-management), [TCP healthcheck for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [HTTP Load Balancer with Cloud Armor](./cloud-operations/glb_and_armor) -- **data solutions** - [GCE/GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms/), [Cloud Storage to Bigquery with Cloud Dataflow with least privileges](./data-solutions/gcs-to-bq-with-least-privileges/), [Data Platform Foundations](./data-solutions/data-platform-foundations/), [SQL Server AlwaysOn availability groups blueprint](./data-solutions/sqlserver-alwayson), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion/), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2/) -- **factories** - [The why and the how of resource factories](./factories/README.md) -- **GKE** - [GKE multitenant fleet](./gke/multitenant-fleet/), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [Binary Authorization Pipeline](./gke/binauthz/), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api/) -- **networking** - [hub and spoke via peering](./networking/hub-and-spoke-peering/), [hub and spoke via VPN](./networking/hub-and-spoke-vpn/), [DNS and Google Private Access for on-premises](./networking/onprem-google-access-dns/), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [ILB as next hop](./networking/ilb-next-hop), [Connecting to on-premise services leveraging PSC and hybrid NEGs](./networking/psc-hybrid/), [decentralized firewall](./networking/decentralized-firewall) -- **serverless** - [Multi-region deployments for API Gateway](./serverless/api-gateway/) -- **third party solutions** - [OpenShift cluster on Shared VPC](./third-party-solutions/openshift) +- **cloud operations** - [Active Directory Federation Services](./cloud-operations/adfs), [Cloud Asset Inventory feeds for resource change tracking and remediation](./cloud-operations/asset-inventory-feed-remediation), [Fine-grained Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Cloud DNS & Shared VPC design](./cloud-operations/dns-shared-vpc), [Delegated Role Grants](./cloud-operations/iam-delegated-role-grants), [Networking Dashboard](./cloud-operations/network-dashboard), [Managing on-prem service account keys by uploading public keys](./cloud-operations/onprem-sa-key-management), [Compute Image builder with Hashicorp Packer](./cloud-operations/packer-image-builder), [Packer example](./cloud-operations/packer-image-builder/packer), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Configuring workload identity federation for Terraform Cloud/Enterprise workflow](./cloud-operations/terraform-enterprise-wif), [TCP healthcheck and restart for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [Migrate for Compute Engine (v5) blueprints](./cloud-operations/vm-migration), [Configuring workload identity federation to access Google Cloud resources from apps running on Azure](./cloud-operations/workload-identity-federation) +- **data solutions** - [GCE and GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion), [Data Platform](./data-solutions/data-platform-foundations), [Spinning up a foundation data pipeline on Google Cloud using Cloud Storage, Dataflow and BigQuery](./data-solutions/gcs-to-bq-with-least-privileges), [#SQL Server Always On Groups blueprint](./data-solutions/sqlserver-alwayson), [Data Playground](./data-solutions/data-playground) +- **factories** - [[The why and the how of Resource Factories](./factories), [Google Cloud Identity Group Factory](./factories/cloud-identity-group-factory), [Google Cloud BQ Factory](./factories/bigquery-factory), [Google Cloud VPC Firewall Factory](./factories/net-vpc-firewall-yaml), [Minimal Project Factory](./factories/project-factory) +- **GKE** - [Binary Authorization Pipeline Blueprint](./gke/binauthz), [Storage API](./gke/binauthz/image), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api), [GKE Multitenant Blueprint](./gke/multitenant-fleet), [Shared VPC with GKE support](./networking/shared-vpc-gke/) +- **networking** - [Decentralized firewall management](./networking/decentralized-firewall), [Decentralized firewall validator](./networking/decentralized-firewall/validator), [Network filtering with Squid](./networking/filtering-proxy), [HTTP Load Balancer with Cloud Armor](./networking/glb-and-armor), [Hub and Spoke via VPN](./networking/hub-and-spoke-vpn), [Hub and Spoke via VPC Peering](./networking/hub-and-spoke-peering), [Internal Load Balancer as Next Hop](./networking/ilb-next-hop), [Nginx-based reverse proxy cluster](./networking/nginx-reverse-proxy-cluster), [On-prem DNS and Google Private Access](./networking/onprem-google-access-dns), [Calling a private Cloud Function from On-premises](./networking/private-cloud-function-from-onprem), [Hybrid connectivity to on-premise services through PSC](./networking/psc-hybrid), [PSC Producer](./networking/psc-hybrid/psc-producer), [PSC Consumer](./networking/psc-hybrid/psc-consumer), [Shared VPC with optional GKE cluster](./networking/shared-vpc-gke) +- **serverless** - [Creating multi-region deployments for API Gateway](./serverless/api-gateway) +- **third party solutions** - [OpenShift on GCP user-provisioned infrastructure](./third-party-solutions/openshift), [Wordpress deployment on Cloud Run](./third-party-solutions/wordpress/cloudrun) For more information see the individual README files in each section. diff --git a/blueprints/cloud-operations/README.md b/blueprints/cloud-operations/README.md index 36e4c41b..863aee58 100644 --- a/blueprints/cloud-operations/README.md +++ b/blueprints/cloud-operations/README.md @@ -2,6 +2,12 @@ The blueprints in this folder show how to wire together different Google Cloud services to simplify operations, and are meant for testing, or as minimal but sufficiently complete starting points for actual use. +## Active Directory Federation Services + + This [blueprint](./adfs/) Sets up managed AD, creates a server where AD FS will be installed which will also act as admin workstation for AD, and exposes ADFS using GLB. It can also optionally set up a GCP project and VPC if needed + +
+ ## Resource tracking and remediation via Cloud Asset feeds This [blueprint](./asset-inventory-feed-remediation) shows how to leverage [Cloud Asset Inventory feeds](https://cloud.google.com/asset-inventory/docs/monitoring-asset-changes) to stream resource changes in real time, and how to programmatically use the feed change notifications for alerting or remediation, via a Cloud Function wired to the feed PubSub queue. @@ -10,12 +16,6 @@ The blueprint's feed tracks changes to Google Compute instances, and the Cloud F
-## Scheduled Cloud Asset Inventory Export to Bigquery - - This [blueprint](./scheduled-asset-inventory-export-bq) shows how to leverage the [Cloud Asset Inventory Exporting to Bigquery](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery) feature, to keep track of your organization's assets over time storing information in Bigquery. Data stored in Bigquery can then be used for different purposes like dashboarding or analysis. - -
- ## Granular Cloud DNS IAM via Service Directory This [blueprint](./dns-fine-grained-iam) shows how to leverage [Service Directory](https://cloud.google.com/blog/products/networking/introducing-service-directory) and Cloud DNS Service Directory private zones, to implement fine-grained IAM controls on DNS. The blueprint creates a Service Directory namespace, a Cloud DNS private zone that uses it as its authoritative source, service accounts with different levels of permissions, and VMs to test them. @@ -28,37 +28,62 @@ The blueprint's feed tracks changes to Google Compute instances, and the Cloud F
-## Compute Engine quota monitoring - - This [blueprint](./quota-monitoring) shows a practical way of collecting and monitoring [Compute Engine resource quotas](https://cloud.google.com/compute/quotas) via Cloud Monitoring metrics as an alternative to the recently released [built-in quota metrics](https://cloud.google.com/monitoring/alerts/using-quota-metrics). A simple alert on quota thresholds is also part of the blueprint. - -
- ## Delegated Role Grants This [blueprint](./iam-delegated-role-grants) shows how to use delegated role grants to restrict service usage.
+## Network Dashboard + + This [blueprint](./network-dashboard/) provides an end-to-end solution to gather some GCP Networking quotas and limits (that cannot be seen in the GCP console today) and display them in a dashboard. The goal is to allow for better visibility of these limits, facilitating capacity planning and avoiding hitting these limits.. + +
+ +## On-prem Service Account key management + +This [blueprint](./onprem-sa-key-management) shows how to manage IAM Service Account Keys by manually generating a key pair and uploading the public part of the key to GCP. + +
+ ## Packer image builder This [blueprint](./packer-image-builder) shows how to deploy infrastructure for a Compute Engine image builder based on [Hashicorp's Packer tool](https://www.packer.io).
-## On-prem Service Account key management +## Compute Engine quota monitoring - -This [blueprint](./onprem-sa-key-management) shows how to manage IAM Service Account Keys by manually generating a key pair and uploading the public part of the key to GCP. + This [blueprint](./quota-monitoring) shows a practical way of collecting and monitoring [Compute Engine resource quotas](https://cloud.google.com/compute/quotas) via Cloud Monitoring metrics as an alternative to the recently released [built-in quota metrics](https://cloud.google.com/monitoring/alerts/using-quota-metrics). A simple alert on quota thresholds is also part of the blueprint.
-## Migrate for Compute Engine (v5) - This set of [blueprints](./vm-migration) shows how to deploy Migrate for Compute Engine (v5) on top of existing Cloud Foundations on different scenarios. An blueprint on how to deploy the M4CE connector on VMWare ESXi is also part of the blueprints. +## Scheduled Cloud Asset Inventory Export to Bigquery + + This [blueprint](./scheduled-asset-inventory-export-bq) shows how to leverage the [Cloud Asset Inventory Exporting to Bigquery](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery) feature, to keep track of your organization's assets over time storing information in Bigquery. Data stored in Bigquery can then be used for different purposes like dashboarding or analysis. + +
+ +## Workload identity federation for Terraform Enterprise workflow + + This [blueprint](./terraform-enterprise-wif) shows how to configure [Wokload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) between [Terraform Cloud/Enterprise](https://developer.hashicorp.com/terraform/enterprise) instance and Google Cloud.
## TCP healthcheck for unmanaged GCE instances + This [blueprint](./unmanaged-instances-healthcheck) shows how to leverage [Serverless VPC Access](https://cloud.google.com/vpc/docs/configure-serverless-vpc-access) and Cloud Functions to organize a highly performant TCP healtheck for unmanaged GCE instances.
+ +## Migrate for Compute Engine (v5) + + This set of [blueprints](./vm-migration) shows how to deploy Migrate for Compute Engine (v5) on top of existing Cloud Foundations on different scenarios. An blueprint on how to deploy the M4CE connector on VMWare ESXi is also part of the blueprints. + +
+ +## Configuring Workload Identity Federation from apps running on Azure + + This [blueprint](./workload-identity-federation) shows how to set up everything, both in Azure and Google Cloud, so a workload in Azure can access Google Cloud resources without a service account key. This will be possible by configuring workload identity federation to trust access tokens generated for a specific application in an Azure Active Directory (AAD) tenant. + +
diff --git a/blueprints/cloud-operations/adfs/README.md b/blueprints/cloud-operations/adfs/README.md index a690f1ea..0b954884 100644 --- a/blueprints/cloud-operations/adfs/README.md +++ b/blueprints/cloud-operations/adfs/README.md @@ -1,19 +1,19 @@ -# AD FS +# Active Directory Federation Services -This blueprint does the following: +This blueprint does the following: Terraform: - (Optional) Creates a project. - (Optional) Creates a VPC. - Sets up managed AD -- Creates a server where AD FS will be installed. This machine will also act as admin workstation for AD. +- Creates a server where AD FS will be installed. This machine will also act as admin workstation for AD. - Exposes AD FS using GLB. Ansible: - Installs the required Windows features and joins the computer to the AD domain. -- Provisions some tests users, groups and group memberships in AD. The data to provision is in the files directory of the ad-provisioning ansible role. There is script available in the scripts/ad-provisioning folder that you can use to generate an alternative users or memberships file. +- Provisions some tests users, groups and group memberships in AD. The data to provision is in the files directory of the ad-provisioning ansible role. There is script available in the scripts/ad-provisioning folder that you can use to generate an alternative users or memberships file. - Installs AD FS In addition to this, we also include a Powershell script that facilitates the configuration required for Anthos when authenticating users with AD FS as IdP. @@ -26,8 +26,8 @@ The diagram below depicts the architecture of the blueprint: Clone this repository or [open it in cloud shell](https://ssh.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fcloud-foundation-fabric&cloudshell_print=cloud-shell-readme.txt&cloudshell_working_dir=blueprints%2Fcloud-operations%2Fadfs), then go through the following steps to create resources: -* `terraform init` -* `terraform apply -var project_id=my-project-id -var ad_dns_domain_name=my-domain.org -var adfs_dns_domain_name=adfs.my-domain.org` +- `terraform init` +- `terraform apply -var project_id=my-project-id -var ad_dns_domain_name=my-domain.org -var adfs_dns_domain_name=adfs.my-domain.org` Once the resources have been created, do the following: diff --git a/blueprints/cloud-operations/network-dashboard/README.md b/blueprints/cloud-operations/network-dashboard/README.md index 4d62ed5f..5e640fdd 100644 --- a/blueprints/cloud-operations/network-dashboard/README.md +++ b/blueprints/cloud-operations/network-dashboard/README.md @@ -15,12 +15,14 @@ Three metric descriptors are created for each monitored resource: usage, limit a Clone this repository, then go through the following steps to create resources: - Create a terraform.tfvars file with the following content: - - organization_id = "" - - billing_account = "" - - monitoring_project_id = "project-0" # Monitoring project where the dashboard will be created and the solution deployed - - monitored_projects_list = ["project-1", "project2"] # Projects to be monitored by the solution - - monitored_folders_list = ["folder_id"] # Folders to be monitored by the solution - - v2 = true|false # Set to true to use V2 Cloud Functions environment + ```tfvars + organization_id = "" + billing_account = "" + monitoring_project_id = "project-0" # Monitoring project where the dashboard will be created and the solution deployed + monitored_projects_list = ["project-1", "project2"] # Projects to be monitored by the solution + monitored_folders_list = ["folder_id"] # Folders to be monitored by the solution + v2 = false # Set to true to use V2 Cloud Functions environment + ``` - `terraform init` - `terraform apply` diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/README.md new file mode 100644 index 00000000..4bb282c5 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/README.md @@ -0,0 +1,115 @@ +# Configuring workload identity federation for Terraform Cloud/Enterprise workflow + +The most common way to use Terraform Cloud for GCP deployments is to store a GCP Service Account Key as a part of TFE Workflow configuration, as we all know there are security risks due to the fact that keys are long term credentials that could be compromised. + +Workload identity federation enables applications running outside of Google Cloud to replace long-lived service account keys with short-lived access tokens. This is achieved by configuring Google Cloud to trust an external identity provider, so applications can use the credentials issued by the external identity provider to impersonate a service account. + +This blueprint shows how to set up [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) between [Terraform Cloud/Enterprise](https://developer.hashicorp.com/terraform/enterprise) instance and Google Cloud. This will be possible by configuring workload identity federation to trust oidc tokens generated for a specific workflow in a Terraform Enterprise organization. + +The following diagram illustrates how the VM will get a short-lived access token and use it to access a resource: + + ![Sequence diagram](diagram.png) + +## Running the blueprint + +### Create Terraform Enterprise Workflow +If you don't have an existing Terraform Enterprise organization you can sign up for a [free trial](https://app.terraform.io/public/signup/account) account. + +Create a new Workspace for a `CLI-driven workflow` (Identity Federation will work for any workflow type, but for simplicity of the blueprint we use CLI driven workflow). + +Note workspace name and id (id starts with `ws-`), we will use them on a later stage. + +Go to the organization settings and note the org name and id (id starts with `org-`). + +### Deploy GCP Workload Identity Pool Provider for Terraform Enterprise + +> **_NOTE:_** This is a preparation part and should be executed on behalf of a user with enough permissions. + +Required permissions when new project is created: + - Project Creator on the parent folder/org. + + Required permissions when an existing project is used: + - Workload Identity Admin on the project level + - Project IAM Admin on the project level + +Fill out required variables, use TFE Org and Workspace IDs from the previous steps (IDs are not the names). +```bash +cd gcp-workload-identity-provider + +mv terraform.auto.tfvars.template terraform.auto.tfvars + +vi terraform.auto.tfvars +``` + +Authenticate using application default credentials, execute terraform code and deploy resources +``` +gcloud auth application-default login + +terraform init + +terraform apply +``` + +As a result a set of outputs will be provided (your values will be different), note the output since we will use it on the next steps. + +``` +impersonate_service_account_email = "sa-tfe@fe-test-oidc.iam.gserviceaccount.com" +project_id = "tfe-test-oidc" +workload_identity_audience = "//iam.googleapis.com/projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +workload_identity_pool_provider_id = "projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +``` + +### Configure OIDC provider for your TFE Workflow + +To enable OIDC for a TFE workflow it's enough to setup an environment variable `TFC_WORKLOAD_IDENTITY_AUDIENCE`. + +Go the the Workflow -> Variables and add a new variable `TFC_WORKLOAD_IDENTITY_AUDIENCE` equal to the value of `workload_identity_audience` output, in our example it's: + +``` +TFC_WORKLOAD_IDENTITY_AUDIENCE = "//iam.googleapis.com/projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +``` + +At that point we setup GCP Identity Federation to trust TFE generated OIDC tokens, so the TFE workflow can use the token to impersonate a GCP Service Account. + +## Testing the blueprint + +In order to test the setup we will deploy a GCS bucket from TFE Workflow using OIDC token for Service Account Impersonation. + +### Configure backend and variables + +First, we need to configure TFE Remote backend for our testing terraform code, use TFE Organization name and workspace name (names are not the same as ids) + +``` +cd ../tfc-workflow-using-wif + +mv backend.tf.template backend.tf + + +vi backend.tf + +``` + +Fill out variables based on the output from the preparation steps: + +``` +mv terraform.auto.tfvars.template terraform.auto.tfvars + +vi terraform.auto.tfvars + +``` + +### Authenticate terraform for triggering CLI-driven workflow + +Follow this [documentation](https://learn.hashicorp.com/tutorials/terraform/cloud-login) to login ti terraform cloud from the CLI. + +### Trigger the workflow + +``` +terraform init + +terraform apply +``` + +As a result we have a successfully deployed GCS bucket from Terraform Enterprise workflow using Workload Identity Federation. + +Once done testing, you can clean up resources by running `terraform destroy` first in the `tfc-workflow-using-wif` and then `gcp-workload-identity-provider` folders. diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png b/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png new file mode 100644 index 00000000..d4e6f82e Binary files /dev/null and b/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png differ diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md new file mode 100644 index 00000000..40e00f86 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md @@ -0,0 +1,33 @@ +# GCP Workload Identity Provider for Terraform Enterprise + +This terraform code is a part of [GCP Workload Identity Federation for Terraform Enterprise](../) blueprint. + +The codebase provisions the following list of resources: + +- GCS Bucket + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [billing_account](variables.tf#L16) | Billing account id used as default for new projects. | string | ✓ | | +| [project_id](variables.tf#L38) | Existing project id. | string | ✓ | | +| [tfe_organization_id](variables.tf#L43) | | | ✓ | | +| [tfe_workspace_id](variables.tf#L48) | | | ✓ | | +| [issuer_uri](variables.tf#L65) | Terraform Enterprise uri. Replace the uri if a self hosted instance is used. | string | | "https://app.terraform.io/" | +| [parent](variables.tf#L27) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | string | | null | +| [project_create](variables.tf#L21) | Create project instead of using an existing one. | bool | | true | +| [workload_identity_pool_id](variables.tf#L53) | Workload identity pool id. | string | | "tfe-pool" | +| [workload_identity_pool_provider_id](variables.tf#L59) | Workload identity pool provider id. | string | | "tfe-provider" | + +## Outputs + +| name | description | sensitive | +|---|---|:---:| +| [impersonate_service_account_email](outputs.tf#L31) | | | +| [project_id](outputs.tf#L16) | | | +| [workload_identity_audience](outputs.tf#L26) | | | +| [workload_identity_pool_provider_id](outputs.tf#L21) | GCP workload identity pool provider ID. | | + + diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf new file mode 100644 index 00000000..5ced2e3c --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf @@ -0,0 +1,91 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +############################################################################### +# GCP PROJECT # +############################################################################### + +module "project" { + source = "../../../../modules/project" + name = var.project_id + project_create = var.project_create + parent = var.parent + billing_account = var.billing_account + services = [ + "iam.googleapis.com", + "cloudresourcemanager.googleapis.com", + "iamcredentials.googleapis.com", + "sts.googleapis.com", + "storage.googleapis.com" + ] +} + +############################################################################### +# Workload Identity Pool and Provider # +############################################################################### + +resource "google_iam_workload_identity_pool" "tfe-pool" { + project = module.project.project_id + workload_identity_pool_id = var.workload_identity_pool_id + display_name = "TFE Pool" + description = "Identity pool for Terraform Enterprise OIDC integration" +} + +resource "google_iam_workload_identity_pool_provider" "tfe-pool-provider" { + project = module.project.project_id + workload_identity_pool_id = google_iam_workload_identity_pool.tfe-pool.workload_identity_pool_id + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + display_name = "TFE Pool Provider" + description = "OIDC identity pool provider for TFE Integration" + # Use condition to make sure only token generated for a specific TFE Org can be used across org workspaces + attribute_condition = "attribute.terraform_organization_id == \"${var.tfe_organization_id}\"" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.aud" = "assertion.aud" + "attribute.terraform_run_phase" = "assertion.terraform_run_phase" + "attribute.terraform_workspace_id" = "assertion.terraform_workspace_id" + "attribute.terraform_workspace_name" = "assertion.terraform_workspace_name" + "attribute.terraform_organization_id" = "assertion.terraform_organization_id" + "attribute.terraform_organization_name" = "assertion.terraform_organization_name" + "attribute.terraform_run_id" = "assertion.terraform_run_id" + "attribute.terraform_full_workspace" = "assertion.terraform_full_workspace" + } + oidc { + # Should be different if self hosted TFE instance is used + issuer_uri = var.issuer_uri + } +} + +############################################################################### +# Service Account and IAM bindings # +############################################################################### + +module "sa-tfe" { + source = "../../../../modules/iam-service-account" + project_id = module.project.project_id + name = "sa-tfe" + + iam = { + # We allow only tokens generated by a specific TFE workspace impersonation of the service account, + # that way one identity pool can be used for a TFE Organization, but every workspace will be able to impersonate only a specifc SA + "roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfe-pool.name}/attribute.terraform_workspace_id/${var.tfe_workspace_id}"] + } + + iam_project_roles = { + "${module.project.project_id}" = [ + "roles/storage.admin" + ] + } +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf new file mode 100644 index 00000000..79cea39a --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf @@ -0,0 +1,34 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +output "project_id" { + description = "GCP Project ID." + value = module.project.project_id +} + +output "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + value = google_iam_workload_identity_pool_provider.tfe-pool-provider.name +} + +output "workload_identity_audience" { + description = "TFC Workload Identity Audience." + value = "//iam.googleapis.com/${google_iam_workload_identity_pool_provider.tfe-pool-provider.name}" +} + +output "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity." + value = module.sa-tfe.email +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template new file mode 100644 index 00000000..645eea0b --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template @@ -0,0 +1,20 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +parent = "folders/437102807785" +project_id = "my-project-id" +tfe_organization_id = "org-W3bz9neazHrZz99U" +tfe_workspace_id = "ws-DFxEE3NmeMdaAvoK" +billing_account = "015617-1B8CBC-AF10D9" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf new file mode 100644 index 00000000..62163d17 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf @@ -0,0 +1,69 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +variable "billing_account" { + description = "Billing account id used as default for new projects." + type = string +} + +variable "project_create" { + description = "Create project instead of using an existing one." + type = bool + default = true +} + +variable "parent" { + description = "Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format." + type = string + default = null + validation { + condition = var.parent == null || can(regex("(organizations|folders)/[0-9]+", var.parent)) + error_message = "Parent must be of the form folders/folder_id or organizations/organization_id." + } +} + + +variable "project_id" { + description = "Existing project id." + type = string +} + +variable "tfe_organization_id" { + description = "TFE organization id." + type = string +} + +variable "tfe_workspace_id" { + description = "TFE workspace id." + type = string +} + +variable "workload_identity_pool_id" { + description = "Workload identity pool id." + type = string + default = "tfe-pool" +} + +variable "workload_identity_pool_provider_id" { + description = "Workload identity pool provider id." + type = string + default = "tfe-provider" +} + +variable "issuer_uri" { + description = "Terraform Enterprise uri. Replace the uri if a self hosted instance is used." + type = string + default = "https://app.terraform.io/" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md new file mode 100644 index 00000000..5226dd64 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md @@ -0,0 +1,19 @@ +# GCP Workload Identity Provider for Terraform Enterprise + +This terraform code is a part of [GCP Workload Identity Federation for Terraform Enterprise](../) blueprint. For instructions please refer to the blueprint [readme](../README.md). + +The codebase provisions the following list of resources: + +- GCS Bucket + + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [impersonate_service_account_email](variables.tf#L26) | | | ✓ | | +| [project_id](variables.tf#L16) | | | ✓ | | +| [workload_identity_pool_provider_id](variables.tf#L21) | GCP workload identity pool provider ID. | string | ✓ | | + + diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template new file mode 100644 index 00000000..87d4737d --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template @@ -0,0 +1,29 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# The block below configures Terraform to use the 'remote' backend with Terraform Cloud. +# For more information, see https://www.terraform.io/docs/backends/types/remote.html + +terraform { + backend "remote" { + organization = "" + + workspaces { + name = "" + } + } + + required_version = ">= 0.14.0" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf new file mode 100644 index 00000000..5e03ada5 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf @@ -0,0 +1,25 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +############################################################################### +# TEST RESOURCE TO VALIDATE WIF # +############################################################################### + +resource "google_storage_bucket" "test-bucket" { + project = var.project_id + name = "${var.project_id}-tfe-oidc-test-bucket" + location = "US" + force_destroy = true +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf new file mode 100644 index 00000000..47f24620 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf @@ -0,0 +1,25 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +module "tfe_oidc" { + source = "./tfc-oidc" + + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + impersonate_service_account_email = var.impersonate_service_account_email +} + +provider "google" { + credentials = module.tfe_oidc.credentials +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template new file mode 100644 index 00000000..efea4cc9 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template @@ -0,0 +1,17 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = "tfe-oidc-workflow" +workload_identity_pool_provider_id = "projects/683987109094/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +impersonate_service_account_email = "sa-tfe@tfe-oidc-workflow2.iam.gserviceaccount.com" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md new file mode 100644 index 00000000..bb8d7983 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md @@ -0,0 +1,40 @@ +# Terraform Enterprise OIDC Credential for GCP Workload Identity Federation + +This is a helper module to prepare GCP Credentials from Terraform Enterprise workload identity token. For more information see [Terraform Enterprise Workload Identity Federation](../) blueprint. + +## Example +```hcl +module "tfe_oidc" { + source = "./tfe_oidc" + + workload_identity_pool_provider_id = "projects/683987109094/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" + impersonate_service_account_email = "tfe-test@tfe-test-wif.iam.gserviceaccount.com" +} + +provider "google" { + credentials = module.tfe_oidc.credentials +} + +provider "google-beta" { + credentials = module.tfe_oidc.credentials +} + +# tftest skip +``` + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [impersonate_service_account_email](variables.tf#L22) | Service account to be impersonated by workload identity federation. | string | ✓ | | +| [workload_identity_pool_provider_id](variables.tf#L17) | GCP workload identity pool provider ID. | string | ✓ | | +| [tmp_oidc_token_path](variables.tf#L27) | Name of the temporary file where TFC OIDC token will be stored to authentificate terraform provider google. | string | | ".oidc_token" | + +## Outputs + +| name | description | sensitive | +|---|---|:---:| +| [credentials](outputs.tf#L17) | | | + + diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf new file mode 100644 index 00000000..2c510a6a --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + audience = "//iam.googleapis.com/${var.workload_identity_pool_provider_id}" +} + +data "external" "oidc_token_file" { + program = ["bash", "${path.module}/write_token.sh", "${var.tmp_oidc_token_path}"] +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf new file mode 100644 index 00000000..fbcea8c2 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "credentials" { + value = jsonencode({ + "type" : "external_account", + "audience" : "${local.audience}", + "subject_token_type" : "urn:ietf:params:oauth:token-type:jwt", + "token_url" : "https://sts.googleapis.com/v1/token", + "credential_source" : data.external.oidc_token_file.result + "service_account_impersonation_url" : "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/${var.impersonate_service_account_email}:generateAccessToken" + }) +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf new file mode 100644 index 00000000..06f310da --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf @@ -0,0 +1,31 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + type = string +} + +variable "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity federation." + type = string +} + +variable "tmp_oidc_token_path" { + description = "Name of the temporary file where TFC OIDC token will be stored to authentificate terraform provider google." + type = string + default = ".oidc_token" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf new file mode 100644 index 00000000..a079e99c --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf @@ -0,0 +1,17 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +terraform { + required_version = ">= 1.3.1" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh new file mode 100644 index 00000000..2f7e30a2 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Exit if any of the intermediate steps fail +set -e + +FILENAME=$@ + +echo $TFC_WORKLOAD_IDENTITY_TOKEN > $FILENAME + +echo -n "{\"file\":\"${FILENAME}\"}" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf new file mode 100644 index 00000000..3f36c2ca --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf @@ -0,0 +1,29 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +variable "project_id" { + description = "GCP project ID." + type = string +} + +variable "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + type = string +} + +variable "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity." + type = string +} diff --git a/blueprints/cloud-operations/workload-identity-federation/README.md b/blueprints/cloud-operations/workload-identity-federation/README.md index fb990342..ad6feaed 100644 --- a/blueprints/cloud-operations/workload-identity-federation/README.md +++ b/blueprints/cloud-operations/workload-identity-federation/README.md @@ -1,9 +1,9 @@ -# Configuring workload identity federation to access Google Cloud resources from apps running on Azure +# Configuring Workload Identity Federation to access Google Cloud resources from apps running on Azure The most straightforward way for workloads running outside of Google Cloud to call Google Cloud APIs is by using a downloaded service account key. However, this approach has 2 major pain points: * A management hassle, keys need to be stored securely and rotated often. -* A security risk, keys are long term credentials that could be compromised. +* A security risk, keys are long term credentials that could be compromised. Workload identity federation enables applications running outside of Google Cloud to replace long-lived service account keys with short-lived access tokens. This is achieved by configuring Google Cloud to trust an external identity provider, so applications can use the credentials issued by the external identity provider to impersonate a service account. @@ -19,17 +19,17 @@ The provided terraform configuration will set up the following architecture: * On Azure: - * An Azure Active Directory application and a service principal. By default, the new application grants all users in the Azure AD tenant permission to obtain access tokens. So an app role assignment will be required to restrict which identities can obtain access tokens for the application. + * An Azure Active Directory application and a service principal. By default, the new application grants all users in the Azure AD tenant permission to obtain access tokens. So an app role assignment will be required to restrict which identities can obtain access tokens for the application. - * Optionally, all the resources required to have a VM configured to run with a system-assigned managed identity and accessible via SSH on a public IP using public key authentication, so we can log in to the machine and run the `gcloud` command to verify that everything works as expected. + * Optionally, all the resources required to have a VM configured to run with a system-assigned managed identity and accessible via SSH on a public IP using public key authentication, so we can log in to the machine and run the `gcloud` command to verify that everything works as expected. * On Google Cloud: - * A Google Cloud project with: + * A Google Cloud project with: - * A workload identity pool and provider configured to trust the AAD application + * A workload identity pool and provider configured to trust the AAD application - * A service account with the Viewer role granted on the project. The external identities in the workload identity pool would be assigned the Workload Identity User role on that service account. + * A service account with the Viewer role granted on the project. The external identities in the workload identity pool would be assigned the Workload Identity User role on that service account. ## Running the blueprint @@ -42,7 +42,7 @@ Clone this repository or [open it in cloud shell](https://ssh.cloud.google.com/c Once the resources have been created, do the following to verify that everything works as expected: -1. Log in to the VM. +1. Log in to the VM. If you have created the VM using this terraform configuration proceed the following way: @@ -72,7 +72,6 @@ Once the resources have been created, do the following to verify that everything `gcloud projects describe PROJECT_ID` - Once done testing, you can clean up resources by running `terraform destroy`. diff --git a/blueprints/data-solutions/README.md b/blueprints/data-solutions/README.md index 968d7b9c..4919f29a 100644 --- a/blueprints/data-solutions/README.md +++ b/blueprints/data-solutions/README.md @@ -6,32 +6,32 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Cloud SQL instance with multi-region read replicas + + +This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https://cloud.google.com/sql) with multi-region read replicas as described in the [Cloud SQL for PostgreSQL disaster recovery](https://cloud.google.com/architecture/cloud-sql-postgres-disaster-recovery-complete-failover-fallback) article. + +
+ ### GCE and GCS CMEK via centralized Cloud KMS This [blueprint](./cmek-via-centralized-kms/) implements [CMEK](https://cloud.google.com/kms/docs/cmek) for GCS and GCE, via keys hosted in KMS running in a centralized project. The blueprint shows the basic resources and permissions for the typical use case of application projects implementing encryption at rest via a centrally managed KMS service. +
-### Cloud Storage to Bigquery with Cloud Dataflow with least privileges +### Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key + + +This [blueprint](./composer-2/) creates a [Cloud Composer](https://cloud.google.com/composer/) version 2 instance on a VPC with a dedicated service account. The solution supports as inputs: a Shared VPC and Cloud KMS CMEK keys. - This [blueprint](./gcs-to-bq-with-least-privileges/) implements resources required to run GCS to BigQuery Dataflow pipelines. The solution rely on a set of Services account created with the least privileges principle.
### Data Platform Foundations This [blueprint](./data-platform-foundations/) implements a robust and flexible Data Foundation on GCP that provides opinionated defaults, allowing customers to build and scale out additional data pipelines quickly and reliably. -
-### SQL Server Always On Availability Groups - - -This [blueprint](./data-platform-foundations/) implements SQL Server Always On Availability Groups using Fabric modules. It builds a two node cluster with a fileshare witness instance in an existing VPC and adds the necessary firewalling. The actual setup process (apart from Active Directory operations) has been scripted, so that least amount of manual works needs to performed. -
- -### Cloud SQL instance with multi-region read replicas - - -This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https://cloud.google.com/sql) with multi-region read replicas as described in the [Cloud SQL for PostgreSQL disaster recovery](https://cloud.google.com/architecture/cloud-sql-postgres-disaster-recovery-complete-failover-fallback) article.
### Data Playground starter with Cloud Vertex AI Notebook and GCS @@ -40,11 +40,18 @@ This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https:/ This [blueprint](./data-playground/) creates a [Vertex AI Notebook](https://cloud.google.com/vertex-ai/docs/workbench/introduction) running on a VPC with a private IP and a dedicated Service Account. A GCS bucket and a BigQuery dataset are created to store inputs and outputs of data experiments. +
-### Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key +### Cloud Storage to Bigquery with Cloud Dataflow with least privileges - -This [blueprint](./composer-2/) creates a [Cloud Composer](https://cloud.google.com/composer/) version 2 instance on a VPC with a dedicated service account. The solution supports as inputs: a Shared VPC and Cloud KMS CMEK keys. -
\ No newline at end of file + This [blueprint](./gcs-to-bq-with-least-privileges/) implements resources required to run GCS to BigQuery Dataflow pipelines. The solution rely on a set of Services account created with the least privileges principle. + +
+ +### SQL Server Always On Availability Groups + + +This [blueprint](./data-platform-foundations/) implements SQL Server Always On Availability Groups using Fabric modules. It builds a two node cluster with a fileshare witness instance in an existing VPC and adds the necessary firewalling. The actual setup process (apart from Active Directory operations) has been scripted, so that least amount of manual works needs to performed. + +
diff --git a/blueprints/data-solutions/cloudsql-multiregion/README.md b/blueprints/data-solutions/cloudsql-multiregion/README.md index babacd58..5bdc6329 100644 --- a/blueprints/data-solutions/cloudsql-multiregion/README.md +++ b/blueprints/data-solutions/cloudsql-multiregion/README.md @@ -39,7 +39,7 @@ If `project_create` is left to `null`, the identity performing the deployment ne Click on the image below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](images/button.png)](https://goo.gle/GoCloudSQL) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoCloudSQL) This will clone the repository to your cloud shell and a screen like this one will appear: @@ -81,7 +81,8 @@ This implementation is intentionally minimal and easy to read. A real world use - Using VPC-SC to mitigate data exfiltration ### Shared VPC -The example supports the configuration of a Shared VPC as an input variable. + +The example supports the configuration of a Shared VPC as an input variable. To deploy the solution on a Shared VPC, you have to configure the `network_config` variable: ``` @@ -94,12 +95,14 @@ network_config = { ``` To run this example, the Shared VPC project needs to have: - - A Private Service Connect with a range of `/24` (example: `10.60.0.0/24`) to deploy the Cloud SQL instance. - - Internet access configured (for example Cloud NAT) to let the Test VM download packages. + +- A Private Service Connect with a range of `/24` (example: `10.60.0.0/24`) to deploy the Cloud SQL instance. +- Internet access configured (for example Cloud NAT) to let the Test VM download packages. In order to run the example and deploy Cloud SQL on a shared VPC the identity running Terraform must have the following IAM role on the Shared VPC Host project. - - Compute Network Admin (roles/compute.networkAdmin) - - Compute Shared VPC Admin (roles/compute.xpnAdmin) + +- Compute Network Admin (roles/compute.networkAdmin) +- Compute Shared VPC Admin (roles/compute.xpnAdmin) ## Test your environment diff --git a/blueprints/data-solutions/cloudsql-multiregion/images/button.png b/blueprints/data-solutions/cloudsql-multiregion/images/button.png deleted file mode 100644 index 21a3f3de..00000000 Binary files a/blueprints/data-solutions/cloudsql-multiregion/images/button.png and /dev/null differ diff --git a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md index 6025ad7f..915ada21 100644 --- a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md +++ b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md @@ -60,8 +60,7 @@ __Note__: To grant a user a role, take a look at the [Granting and Revoking Acce Click on the button below, sign in if required and when the prompt appears, click on “confirm”. - -[![Open Cloudshell](images/shell_button.png)](https://goo.gle/GoDataPipe) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoDataPipe) This will clone the repository to your cloud shell and a screen like this one will appear: @@ -146,13 +145,13 @@ Once this is done, the 3 files necessary to run the Dataflow Job will have been Run the following command to start the dataflow job: - gcloud --impersonate-service-account=orchestrator@$SERVICE_PROJECT_ID.iam.gserviceaccount.com dataflow jobs run test_batch_01 \ + gcloud --impersonate-service-account=orchestrator@$SERVICE_PROJECT_ID.iam.gserviceaccount.com dataflow jobs run test_batch_01 \ --gcs-location gs://dataflow-templates/latest/GCS_Text_to_BigQuery \ --project $SERVICE_PROJECT_ID \ --region europe-west1 \ --disable-public-ips \ --subnetwork https://www.googleapis.com/compute/v1/projects/$SERVICE_PROJECT_ID/regions/europe-west1/subnetworks/subnet \ - --staging-location gs://$PREFIX-df-tmp\ + --staging-location gs://$PREFIX-df-tmp \ --service-account-email df-loading@$SERVICE_PROJECT_ID.iam.gserviceaccount.com \ --parameters \ javascriptTextTransformFunctionName=transform,\ diff --git a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/images/cloud_shell.png b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/images/cloud_shell.png deleted file mode 100644 index 21bb72e0..00000000 Binary files a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/images/cloud_shell.png and /dev/null differ diff --git a/blueprints/gke/README.md b/blueprints/gke/README.md index a2c48071..30418ca4 100644 --- a/blueprints/gke/README.md +++ b/blueprints/gke/README.md @@ -6,6 +6,18 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Binary Authorization Pipeline + + This [blueprint](../gke/binauthz/) shows how to create a CI and a CD pipeline in Cloud Build for the deployment of an application to a private GKE cluster with unrestricted access to a public endpoint. The blueprint enables a Binary Authorization policy in the project so only images that have been attested can be deployed to the cluster. The attestations are created using a cryptographic key pair that has been provisioned in KMS. + +
+ +### Multi-cluster mesh on GKE (fleet API) + + This [blueprint](../gke/multi-cluster-mesh-gke-fleet-api/) shows how to create a multi-cluster mesh for two private clusters on GKE. Anthos Service Mesh with automatic control plane management is set up for clusters using the Fleet API. This can only be done if the clusters are in a single project and in the same VPC. In this particular case both clusters having being deployed to different subnets in a shared VPC. + +
+ ### Multitenant GKE fleet This [blueprint](./multitenant-fleet/) allows simple centralized management of similar sets of GKE clusters and their nodepools in a single project, and optional fleet management via GKE Hub templated configurations. @@ -16,14 +28,5 @@ They are meant to be used as minimal but complete starting points to create actu This [blueprint](../networking/shared-vpc-gke/) shows how to configure a Shared VPC, including the specific IAM configurations needed for GKE, and to give different level of access to the VPC subnets to different identities. It is meant to be used as a starting point for most Shared VPC configurations, and to be integrated to the above blueprints where Shared VPC is needed in more complex network topologies. -
- -### Binary Authorization Pipeline - - This [blueprint](../gke/binauthz/) shows how to create a CI and a CD pipeline in Cloud Build for the deployment of an application to a private GKE cluster with unrestricted access to a public endpoint. The blueprint enables a Binary Authorization policy in the project so only images that have been attested can be deployed to the cluster. The attestations are created using a cryptographic key pair that has been provisioned in KMS. -
- -### Multi-cluster mesh on GKE (fleet API) - - This [blueprint](../gke/multi-cluster-mesh-gke-fleet-api/) shows how to create a multi-cluster mesh for two private clusters on GKE. Anthos Service Mesh with automatic control plane management is set up for clusters using the Fleet API. This can only be done if the clusters are in a single project and in the same VPC. In this particular case both clusters having being deployed to different subnets in a shared VPC. +
diff --git a/blueprints/gke/binauthz/main.tf b/blueprints/gke/binauthz/main.tf index 79323943..0c3655e4 100644 --- a/blueprints/gke/binauthz/main.tf +++ b/blueprints/gke/binauthz/main.tf @@ -99,13 +99,15 @@ module "cluster" { } module "cluster_nodepool" { - source = "../../../modules/gke-nodepool" - project_id = module.project.project_id - cluster_name = module.cluster.name - location = var.zone - name = "nodepool" - service_account = {} - node_count = { initial = 3 } + source = "../../../modules/gke-nodepool" + project_id = module.project.project_id + cluster_name = module.cluster.name + location = var.zone + name = "nodepool" + service_account = { + create = true + } + node_count = { initial = 3 } } module "kms" { diff --git a/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf b/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf index 73ab19b1..6c769d92 100644 --- a/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf +++ b/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf @@ -44,15 +44,17 @@ module "clusters" { } module "cluster_nodepools" { - for_each = var.clusters_config - source = "../../../modules/gke-nodepool" - project_id = module.fleet_project.project_id - cluster_name = module.clusters[each.key].name - location = var.region - name = "nodepool-${each.key}" - node_count = { initial = 1 } - service_account = {} - tags = ["${each.key}-node"] + for_each = var.clusters_config + source = "../../../modules/gke-nodepool" + project_id = module.fleet_project.project_id + cluster_name = module.clusters[each.key].name + location = var.region + name = "nodepool-${each.key}" + node_count = { initial = 1 } + service_account = { + create = true + } + tags = ["${each.key}-node"] } module "hub" { diff --git a/blueprints/gke/multitenant-fleet/README.md b/blueprints/gke/multitenant-fleet/README.md index ab8c6247..bd6df945 100644 --- a/blueprints/gke/multitenant-fleet/README.md +++ b/blueprints/gke/multitenant-fleet/README.md @@ -246,20 +246,20 @@ module "gke" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [billing_account_id](variables.tf#L17) | Billing account id. | string | ✓ | | -| [folder_id](variables.tf#L129) | Folder used for the GKE project in folders/nnnnnnnnnnn format. | string | ✓ | | -| [prefix](variables.tf#L176) | Prefix used for resources that need unique names. | string | ✓ | | -| [project_id](variables.tf#L181) | ID of the project that will contain all the clusters. | string | ✓ | | -| [vpc_config](variables.tf#L193) | Shared VPC project and VPC details. | object({…}) | ✓ | | -| [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | -| [fleet_configmanagement_clusters](variables.tf#L67) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | -| [fleet_configmanagement_templates](variables.tf#L74) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | -| [fleet_features](variables.tf#L109) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | -| [fleet_workload_identity](variables.tf#L122) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | -| [group_iam](variables.tf#L134) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | -| [iam](variables.tf#L141) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | -| [labels](variables.tf#L148) | Project-level labels. | map(string) | | {} | -| [nodepools](variables.tf#L154) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | -| [project_services](variables.tf#L186) | Additional project services to enable. | list(string) | | [] | +| [folder_id](variables.tf#L132) | Folder used for the GKE project in folders/nnnnnnnnnnn format. | string | ✓ | | +| [prefix](variables.tf#L179) | Prefix used for resources that need unique names. | string | ✓ | | +| [project_id](variables.tf#L184) | ID of the project that will contain all the clusters. | string | ✓ | | +| [vpc_config](variables.tf#L196) | Shared VPC project and VPC details. | object({…}) | ✓ | | +| [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | +| [fleet_configmanagement_clusters](variables.tf#L70) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | +| [fleet_configmanagement_templates](variables.tf#L77) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | +| [fleet_features](variables.tf#L112) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | +| [fleet_workload_identity](variables.tf#L125) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | +| [group_iam](variables.tf#L137) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | +| [iam](variables.tf#L144) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | +| [labels](variables.tf#L151) | Project-level labels. | map(string) | | {} | +| [nodepools](variables.tf#L157) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | +| [project_services](variables.tf#L189) | Additional project services to enable. | list(string) | | [] | ## Outputs diff --git a/blueprints/gke/multitenant-fleet/variables.tf b/blueprints/gke/multitenant-fleet/variables.tf index d0464298..8d6c69ae 100644 --- a/blueprints/gke/multitenant-fleet/variables.tf +++ b/blueprints/gke/multitenant-fleet/variables.tf @@ -39,9 +39,12 @@ variable "clusters" { recurring_window = null maintenance_exclusion = [] }) - max_pods_per_node = optional(number, 110) - min_master_version = optional(string) - monitoring_config = optional(list(string), ["SYSTEM_COMPONENTS"]) + max_pods_per_node = optional(number, 110) + min_master_version = optional(string) + monitoring_config = optional(object({ + enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) + managed_prometheus = optional(bool) + })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) diff --git a/blueprints/networking/README.md b/blueprints/networking/README.md index e234cc25..c4a3d2f0 100644 --- a/blueprints/networking/README.md +++ b/blueprints/networking/README.md @@ -6,11 +6,30 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Decentralized firewall management + + This [blueprint](./decentralized-firewall/) shows how a decentralized firewall management can be organized using the [firewall factory](../factories/net-vpc-firewall-yaml/). + +
+ +### Network filtering with Squid + + This [blueprint](./filtering-proxy/) how to deploy a filtering HTTP proxy to restrict Internet access, in a simplified setup using a VPC with two subnets and a Cloud DNS zone, and an optional MIG for scaling. + +
+ +## HTTP Load Balancer with Cloud Armor + + This [blueprint](./glb-and-armor/) contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. + +
+ ### Hub and Spoke via Peering This [blueprint](./hub-and-spoke-peering/) implements a hub and spoke topology via VPC peering, a common design where a landing zone VPC (hub) is connected to on-premises, and then peered with satellite VPCs (spokes) to further partition the infrastructure. The sample highlights the lack of transitivity in peering: the absence of connectivity between spokes, and the need create workarounds for private service access to managed services. One such workaround is shown for private GKE, allowing access from hub and all spokes to GKE masters via a dedicated VPN. +
### Hub and Spoke via Dynamic VPN @@ -18,6 +37,19 @@ The sample highlights the lack of transitivity in peering: the absence of connec This [blueprint](./hub-and-spoke-vpn/) implements a hub and spoke topology via dynamic VPN tunnels, a common design where peering cannot be used due to limitations on the number of spokes or connectivity to managed services. The blueprint shows how to implement spoke transitivity via BGP advertisements, how to expose hub DNS zones to spokes via DNS peering, and allows easy testing of different VPN and BGP configurations. + +
+ +### ILB as next hop + + This [blueprint](./ilb-next-hop/) allows testing [ILB as next hop](https://cloud.google.com/load-balancing/docs/internal/ilb-next-hop-overview) using simple Linux gateway VMS between two VPCs, to emulate virtual appliances. An optional additional ILB can be enabled to test multiple load balancer configurations and hashing. + +
+ +### Nginx-based reverse proxy cluster + + This [blueprint](./nginx-reverse-proxy-cluster/) how to deploy an autoscaling reverse proxy cluster using Nginx, based on regional Managed Instance Groups. The autoscaling is driven by Nginx current connections metric, sent by Cloud Ops Agent. +
### DNS and Private Access for On-premises @@ -25,6 +57,19 @@ The blueprint shows how to implement spoke transitivity via BGP advertisements, This [blueprint](./onprem-google-access-dns/) uses an emulated on-premises environment running in Docker containers inside a GCE instance, to allow testing specific features like DNS policies, DNS forwarding zones across VPN, and Private Access for On-premises hosts. The emulated on-premises environment can be used to test access to different services from outside Google Cloud, by implementing a VPN connection and BGP to Google CLoud via Strongswan and Bird. + +
+ +### Calling a private Cloud Function from on-premises + + This [blueprint](./private-cloud-function-from-onprem/) shows how to invoke a [private Google Cloud Function](https://cloud.google.com/functions/docs/networking/network-settings) from the on-prem environment via a [Private Service Connect endpoint](https://cloud.google.com/vpc/docs/private-service-connect#benefits-apis). + +
+ +### Calling on-premise services through PSC and hybrid NEGs + + This [blueprint](./psc-hybrid/) shows how to privately connect to on-premise services (IP + port) from GCP, leveraging [Private Service Connect (PSC)](https://cloud.google.com/vpc/docs/private-service-connect) and [Hybrid Network Endpoint Groups](https://cloud.google.com/load-balancing/docs/negs/hybrid-neg-concepts). +
### Shared VPC with GKE and per-subnet support @@ -32,24 +77,5 @@ The emulated on-premises environment can be used to test access to different ser This [blueprint](./shared-vpc-gke/) shows how to configure a Shared VPC, including the specific IAM configurations needed for GKE, and to give different level of access to the VPC subnets to different identities. It is meant to be used as a starting point for most Shared VPC configurations, and to be integrated to the above blueprints where Shared VPC is needed in more complex network topologies. -
- -### ILB as next hop - - This [blueprint](./ilb-next-hop/) allows testing [ILB as next hop](https://cloud.google.com/load-balancing/docs/internal/ilb-next-hop-overview) using simple Linux gateway VMS between two VPCs, to emulate virtual appliances. An optional additional ILB can be enabled to test multiple load balancer configurations and hashing. -
- -### Calling a private Cloud Function from on-premises - - This [blueprint](./private-cloud-function-from-onprem/) shows how to invoke a [private Google Cloud Function](https://cloud.google.com/functions/docs/networking/network-settings) from the on-prem environment via a [Private Service Connect endpoint](https://cloud.google.com/vpc/docs/private-service-connect#benefits-apis). -
- -### Calling on-premise services through PSC and hybrid NEGs - - This [blueprint](./psc-hybrid/) shows how to privately connect to on-premise services (IP + port) from GCP, leveraging [Private Service Connect (PSC)](https://cloud.google.com/vpc/docs/private-service-connect) and [Hybrid Network Endpoint Groups](https://cloud.google.com/load-balancing/docs/negs/hybrid-neg-concepts). -
- -### Decentralized firewall management - - This [blueprint](./decentralized-firewall/) shows how a decentralized firewall management can be organized using the [firewall factory](../factories/net-vpc-firewall-yaml/). +
diff --git a/blueprints/cloud-operations/glb_and_armor/README.md b/blueprints/networking/glb-and-armor/README.md similarity index 96% rename from blueprints/cloud-operations/glb_and_armor/README.md rename to blueprints/networking/glb-and-armor/README.md index 25ffec90..0c9a802e 100644 --- a/blueprints/cloud-operations/glb_and_armor/README.md +++ b/blueprints/networking/glb-and-armor/README.md @@ -2,7 +2,7 @@ ## Introduction -This repository contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. +This blueprint contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. This tutorial is general enough to fit in a variety of use-cases, from hosting a mobile app's backend to deploy proprietary workloads at scale. @@ -62,7 +62,7 @@ Note: To grant a user a role, take a look at the [Granting and Revoking Access]( Click on the button below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](shell_button.png)](https://goo.gle/GoCloudArmor) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoCloudArmor) This will clone the repository to your cloud shell and a screen like this one will appear: diff --git a/blueprints/cloud-operations/glb_and_armor/architecture.png b/blueprints/networking/glb-and-armor/architecture.png similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/architecture.png rename to blueprints/networking/glb-and-armor/architecture.png diff --git a/blueprints/cloud-operations/glb_and_armor/cloud_shell.png b/blueprints/networking/glb-and-armor/cloud_shell.png similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/cloud_shell.png rename to blueprints/networking/glb-and-armor/cloud_shell.png diff --git a/blueprints/cloud-operations/glb_and_armor/main.tf b/blueprints/networking/glb-and-armor/main.tf similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/main.tf rename to blueprints/networking/glb-and-armor/main.tf diff --git a/blueprints/cloud-operations/glb_and_armor/outputs.tf b/blueprints/networking/glb-and-armor/outputs.tf similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/outputs.tf rename to blueprints/networking/glb-and-armor/outputs.tf diff --git a/blueprints/cloud-operations/glb_and_armor/variables.tf b/blueprints/networking/glb-and-armor/variables.tf similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/variables.tf rename to blueprints/networking/glb-and-armor/variables.tf diff --git a/blueprints/networking/nginx-reverse-proxy-cluster/README.md b/blueprints/networking/nginx-reverse-proxy-cluster/README.md index c3101a15..b8436283 100644 --- a/blueprints/networking/nginx-reverse-proxy-cluster/README.md +++ b/blueprints/networking/nginx-reverse-proxy-cluster/README.md @@ -1,20 +1,17 @@ # Nginx-based reverse proxy cluster -This blueprint shows how to deploy an autoscaling reverse proxy cluster using Nginx, based on regional -Managed Instance Groups. +This blueprint shows how to deploy an autoscaling reverse proxy cluster using Nginx, based on regional Managed Instance Groups. ![High-level diagram](reverse-proxy.png "High-level diagram") -The autoscaling is driven by Nginx current connections metric, sent by Cloud Ops Agent. +The autoscaling is driven by Nginx current connections metric, sent by Cloud Ops Agent. -The example is for Nginx, but it could be easily adapted to any other reverse proxy software (eg. -Squid, Varnish, etc). +The example is for Nginx, but it could be easily adapted to any other reverse proxy software (eg. Squid, Varnish, etc). ## Ops Agent image -There is a simple [`Dockerfile`](Dockerfile) available for building Ops Agent to be run -inside the ContainerOS instance. Build the container, push it to your Container/Artifact -Repository and set the `ops_agent_image` to point to the image you built. +There is a simple [`Dockerfile`](Dockerfile) available for building Ops Agent to be run inside the ContainerOS instance. Build the container, push it to your Container/Artifact Repository and set the `ops_agent_image` to point to the image you built. + ## Variables diff --git a/blueprints/networking/shared-vpc-gke/main.tf b/blueprints/networking/shared-vpc-gke/main.tf index 9d141acc..59d07d2d 100644 --- a/blueprints/networking/shared-vpc-gke/main.tf +++ b/blueprints/networking/shared-vpc-gke/main.tf @@ -219,11 +219,13 @@ module "cluster-1" { } module "cluster-1-nodepool-1" { - source = "../../../modules/gke-nodepool" - count = var.cluster_create ? 1 : 0 - name = "nodepool-1" - project_id = module.project-svc-gke.project_id - location = module.cluster-1.0.location - cluster_name = module.cluster-1.0.name - service_account = {} + source = "../../../modules/gke-nodepool" + count = var.cluster_create ? 1 : 0 + name = "nodepool-1" + project_id = module.project-svc-gke.project_id + location = module.cluster-1.0.location + cluster_name = module.cluster-1.0.name + service_account = { + create = true + } } diff --git a/blueprints/third-party-solutions/README.md b/blueprints/third-party-solutions/README.md index 10b7ced2..c7cbec73 100644 --- a/blueprints/third-party-solutions/README.md +++ b/blueprints/third-party-solutions/README.md @@ -7,3 +7,11 @@ The blueprints in this folder show how to automate installation of specific thir ### OpenShift cluster bootstrap on Shared VPC This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks. + +
+ +### Wordpress deployment on Cloud Run + + This [example](./wordpress/cloudrun/) shows how to deploy a functioning new Wordpress website exposed to the public internet via CloudRun and Cloud SQL, with minimal technical overhead. + +
diff --git a/blueprints/third-party-solutions/wordpress/cloudrun/README.md b/blueprints/third-party-solutions/wordpress/cloudrun/README.md index ee1e2d90..4ca10796 100644 --- a/blueprints/third-party-solutions/wordpress/cloudrun/README.md +++ b/blueprints/third-party-solutions/wordpress/cloudrun/README.md @@ -36,11 +36,11 @@ If `project_create` is left to null, the identity performing the deployment need If you want to deploy from your Cloud Shell, click on the image below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](images/button.png)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fcloud-foundation-fabric&cloudshell_workspace=blueprints%2Fthird-party-solutions%2Fwordpress%2Fcloudrun) - +[![Open Cloudshell](../../../../assets/images/cloud-shell-button.png)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fcloud-foundation-fabric&cloudshell_workspace=blueprints%2Fthird-party-solutions%2Fwordpress%2Fcloudrun) Otherwise, in your console of choice: -``` {shell} + +```bash git clone https://github.com/GoogleCloudPlatform/cloud-foundation-fabric ``` @@ -70,6 +70,7 @@ Once you have the required information, head back to your cloned repository. Mak Configure the Terraform variables in your `terraform.tfvars` file. See [terraform.tfvars.sample](terraform.tfvars.sample) as starting point - just copy it to `terraform.tfvars` and edit the latter. See the variables documentation below. **Notes**: + 1. If you will want to change your admin password later on, please note that it will only work in the admin interface of Wordpress, but not with redeploying with Terraform, since Wordpress writes that password into the database upon installation and ignores the environment variables (that you can change with Terraform) after that. 2. If you have the [domain restriction org. policy](https://cloud.google.com/resource-manager/docs/organization-policy/restricting-domains) on your organization, you have to edit the `cloud_run_invoker` variable and give it a value that will be accepted in accordance to your policy. @@ -81,22 +82,27 @@ Initialize your Terraform environment and deploy the resources: terraform init terraform apply ``` + The resource creation will take a few minutes. **Note**: you might get the following error (or a similar one): + ``` {shell} │ Error: resource is in failed state "Ready:False", message: Revision '...' is not ready and cannot serve traffic.│ ``` + You might try to reapply at this point, the Cloud Run service just needs several minutes. ### Step 4: Use the created resources Upon completion, you will see the output with the values for the Cloud Run service and the user and password to access the `/admin` part of the website. You can also view it later with: + ``` {shell} terraform output # or for the concrete variable: terraform output cloud_run_service ``` + 1. Open your browser at the URL that you get with that last command, and you will see your Wordpress installation. 2. Add "/admin" in the end of the URL and log in to the admin interface, using the outputs "wp_user" and "wp_password". diff --git a/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png b/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png deleted file mode 100644 index 21a3f3de..00000000 Binary files a/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png and /dev/null differ diff --git a/fast/stages/02-networking-nva/README.md b/fast/stages/02-networking-nva/README.md index 84c236cf..cddfddaa 100644 --- a/fast/stages/02-networking-nva/README.md +++ b/fast/stages/02-networking-nva/README.md @@ -172,6 +172,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed to the VPN tunnels from on-premises, and the following names should be configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-nva/dns-landing.tf b/fast/stages/02-networking-nva/dns-landing.tf index e7834405..40090279 100644 --- a/fast/stages/02-networking-nva/dns-landing.tf +++ b/fast/stages/02-networking-nva/dns-landing.tf @@ -59,7 +59,7 @@ module "gcp-example-dns-private-zone" { } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -81,3 +81,75 @@ module "googleapis-private-zone" { "CNAME *" = { records = ["private.googleapis.com."] } } } + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} diff --git a/fast/stages/02-networking-peering/README.md b/fast/stages/02-networking-peering/README.md index 0e5c72a7..1dfdb9a5 100644 --- a/fast/stages/02-networking-peering/README.md +++ b/fast/stages/02-networking-peering/README.md @@ -102,6 +102,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-peering/dns-landing.tf b/fast/stages/02-networking-peering/dns-landing.tf index e9a5da33..7b97a8cf 100644 --- a/fast/stages/02-networking-peering/dns-landing.tf +++ b/fast/stages/02-networking-peering/dns-landing.tf @@ -50,7 +50,7 @@ module "gcp-example-dns-private-zone" { } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -69,3 +69,63 @@ module "googleapis-private-zone" { "CNAME *" = { records = ["private.googleapis.com."] } } } + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} diff --git a/fast/stages/02-networking-separate-envs/README.md b/fast/stages/02-networking-separate-envs/README.md index 2329aad4..6fdb00cf 100644 --- a/fast/stages/02-networking-separate-envs/README.md +++ b/fast/stages/02-networking-separate-envs/README.md @@ -69,6 +69,13 @@ DNS often goes hand in hand with networking, especially on GCP where Cloud DNS z - on-prem to cloud via private zones for cloud-managed domains, and an [inbound policy](https://cloud.google.com/dns/docs/server-policies-overview#dns-server-policy-in) used as forwarding target or via delegation (requires some extra configuration) from on-prem DNS resolvers - cloud to on-prem via forwarding zones for the on-prem managed domains +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-separate-envs/dns-dev.tf b/fast/stages/02-networking-separate-envs/dns-dev.tf index 5811c255..25adab5e 100644 --- a/fast/stages/02-networking-separate-envs/dns-dev.tf +++ b/fast/stages/02-networking-separate-envs/dns-dev.tf @@ -50,6 +50,8 @@ module "dev-reverse-10-dns-forwarding" { forwarders = { for ip in var.dns.dev : ip => null } } +# Google APIs + module "dev-googleapis-private-zone" { source = "../../../modules/dns" project_id = module.dev-spoke-project.project_id @@ -67,3 +69,63 @@ module "dev-googleapis-private-zone" { "CNAME *" = { records = ["private.googleapis.com."] } } } + +module "dev-gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-packages-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} diff --git a/fast/stages/02-networking-separate-envs/dns-prod.tf b/fast/stages/02-networking-separate-envs/dns-prod.tf index db38064e..47c8cdca 100644 --- a/fast/stages/02-networking-separate-envs/dns-prod.tf +++ b/fast/stages/02-networking-separate-envs/dns-prod.tf @@ -50,6 +50,7 @@ module "prod-reverse-10-dns-forwarding" { forwarders = { for ip in var.dns.prod : ip => null } } +# Google APIs module "prod-googleapis-private-zone" { source = "../../../modules/dns" @@ -68,3 +69,63 @@ module "prod-googleapis-private-zone" { "CNAME *" = { records = ["private.googleapis.com."] } } } + +module "prod-gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-packages-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} diff --git a/fast/stages/02-networking-vpn/README.md b/fast/stages/02-networking-vpn/README.md index 010b2246..783b11fb 100644 --- a/fast/stages/02-networking-vpn/README.md +++ b/fast/stages/02-networking-vpn/README.md @@ -108,6 +108,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-vpn/dns-landing.tf b/fast/stages/02-networking-vpn/dns-landing.tf index e9a5da33..7b97a8cf 100644 --- a/fast/stages/02-networking-vpn/dns-landing.tf +++ b/fast/stages/02-networking-vpn/dns-landing.tf @@ -50,7 +50,7 @@ module "gcp-example-dns-private-zone" { } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -69,3 +69,63 @@ module "googleapis-private-zone" { "CNAME *" = { records = ["private.googleapis.com."] } } } + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} diff --git a/fast/stages/03-gke-multitenant/dev/README.md b/fast/stages/03-gke-multitenant/dev/README.md index ac4e03d3..f3abf494 100644 --- a/fast/stages/03-gke-multitenant/dev/README.md +++ b/fast/stages/03-gke-multitenant/dev/README.md @@ -142,21 +142,21 @@ terraform apply |---|---|:---:|:---:|:---:|:---:| | [automation](variables.tf#L21) | Automation resources created by the bootstrap stage. | object({…}) | ✓ | | 00-bootstrap | | [billing_account](variables.tf#L29) | Billing account id and organization id ('nnnnnnnn' or null). | object({…}) | ✓ | | 00-bootstrap | -| [folder_ids](variables.tf#L146) | Folders to be used for the networking resources in folders/nnnnnnnnnnn format. If null, folder will be created. | object({…}) | ✓ | | 01-resman | -| [host_project_ids](variables.tf#L168) | Host project for the shared VPC. | object({…}) | ✓ | | 02-networking | -| [prefix](variables.tf#L210) | Prefix used for resources that need unique names. | string | ✓ | | | -| [vpc_self_links](variables.tf#L222) | Self link for the shared VPC. | object({…}) | ✓ | | 02-networking | -| [clusters](variables.tf#L38) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | -| [fleet_configmanagement_clusters](variables.tf#L83) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | -| [fleet_configmanagement_templates](variables.tf#L91) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | | -| [fleet_features](variables.tf#L126) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | -| [fleet_workload_identity](variables.tf#L139) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | -| [group_iam](variables.tf#L154) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | -| [iam](variables.tf#L161) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | -| [labels](variables.tf#L176) | Project-level labels. | map(string) | | {} | | -| [nodepools](variables.tf#L182) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | | -| [outputs_location](variables.tf#L204) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | string | | null | | -| [project_services](variables.tf#L215) | Additional project services to enable. | list(string) | | [] | | +| [folder_ids](variables.tf#L149) | Folders to be used for the networking resources in folders/nnnnnnnnnnn format. If null, folder will be created. | object({…}) | ✓ | | 01-resman | +| [host_project_ids](variables.tf#L171) | Host project for the shared VPC. | object({…}) | ✓ | | 02-networking | +| [prefix](variables.tf#L213) | Prefix used for resources that need unique names. | string | ✓ | | | +| [vpc_self_links](variables.tf#L225) | Self link for the shared VPC. | object({…}) | ✓ | | 02-networking | +| [clusters](variables.tf#L38) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | +| [fleet_configmanagement_clusters](variables.tf#L86) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | +| [fleet_configmanagement_templates](variables.tf#L94) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | | +| [fleet_features](variables.tf#L129) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | +| [fleet_workload_identity](variables.tf#L142) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | +| [group_iam](variables.tf#L157) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | +| [iam](variables.tf#L164) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | +| [labels](variables.tf#L179) | Project-level labels. | map(string) | | {} | | +| [nodepools](variables.tf#L185) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | | +| [outputs_location](variables.tf#L207) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | string | | null | | +| [project_services](variables.tf#L218) | Additional project services to enable. | list(string) | | [] | | ## Outputs diff --git a/fast/stages/03-gke-multitenant/dev/variables.tf b/fast/stages/03-gke-multitenant/dev/variables.tf index 1a17da4b..9c5a1d38 100644 --- a/fast/stages/03-gke-multitenant/dev/variables.tf +++ b/fast/stages/03-gke-multitenant/dev/variables.tf @@ -55,9 +55,12 @@ variable "clusters" { recurring_window = null maintenance_exclusion = [] }) - max_pods_per_node = optional(number, 110) - min_master_version = optional(string) - monitoring_config = optional(list(string), ["SYSTEM_COMPONENTS"]) + max_pods_per_node = optional(number, 110) + min_master_version = optional(string) + monitoring_config = optional(object({ + enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) + managed_prometheus = optional(bool) + })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) diff --git a/modules/README.md b/modules/README.md index 92cf25fc..129a8b8f 100644 --- a/modules/README.md +++ b/modules/README.md @@ -13,11 +13,12 @@ These modules are not necessarily backward compatible. Changes breaking compatib These modules are used in the examples included in this repository. If you are using any of those examples in your own Terraform configuration, make sure that you are using the same version for all the modules, and switch module sources to GitHub format using references. The recommended approach to working with Fabric modules is the following: - Fork the repository and own the fork. This will allow you to: - - Evolve the existing modules. - - Create your own modules. - - Sync from the upstream repository to get all the updates. - + - Evolve the existing modules. + - Create your own modules. + - Sync from the upstream repository to get all the updates. + - Use GitHub sources with refs to reference the modules. See an example below: + ```terraform module "project" { source = "github.com/GoogleCloudPlatform/cloud-foundation-fabric//modules/project?ref=v13.0.0" @@ -30,62 +31,65 @@ These modules are used in the examples included in this repository. If you are u ## Foundational modules - [billing budget](./billing-budget) +- [Cloud Identity group](./cloud-identity-group/) - [folder](./folder) +- [service accounts](./iam-service-account) - [logging bucket](./logging-bucket) - [organization](./organization) +- [organization-policy](./organization-policy) - [project](./project) - [projects-data-source](./projects-data-source) -- [service account](./iam-service-account) -- [organization policy](./organization-policy) ## Networking modules -- [address reservation](./net-address) -- [Cloud DNS](./dns) -- [Cloud NAT](./net-cloudnat) +- [DNS](./dns) - [Cloud Endpoints](./endpoints) -- [L4 Internal Load Balancer](./net-ilb) -- [Service Directory](./service-directory) +- [address reservation](./net-address) +- [NAT](./net-cloudnat) +- [Global Load Balancer (classic)](./net-glb/) +- [L4 ILB](./net-ilb) +- [L7 ILB](./net-ilb-l7) - [VPC](./net-vpc) - [VPC firewall](./net-vpc-firewall) - [VPC peering](./net-vpc-peering) -- [VPN static](./net-vpn-static) - [VPN dynamic](./net-vpn-dynamic) - [HA VPN](./net-vpn-ha) -- [ ] TODO: xLB modules +- [VPN static](./net-vpn-static) +- [Service Directory](./service-directory) ## Compute/Container -- [COS container](./cloud-config-container/onprem/) (coredns, mysql, onprem, squid) -- [GKE cluster](./gke-cluster) -- [GKE nodepool](./gke-nodepool) -- [GKE hub](./gke-hub) -- [Managed Instance Group](./compute-mig) - [VM/VM group](./compute-vm) +- [MIG](./compute-mig) +- [COS container](./cloud-config-container/cos-generic-metadata/) (coredns/mysql/nva/onprem/squid) +- [GKE cluster](./gke-cluster) +- [GKE hub](./gke-hub) +- [GKE nodepool](./gke-nodepool) ## Data - [BigQuery dataset](./bigquery-dataset) -- [Datafusion](./datafusion) -- [GCS](./gcs) -- [Pub/Sub](./pubsub) - [Bigtable instance](./bigtable-instance) - [Cloud SQL instance](./cloudsql-instance) - [Data Catalog Policy Tag](./data-catalog-policy-tag) +- [Datafusion](./datafusion) +- [GCS](./gcs) +- [Pub/Sub](./pubsub) ## Development -- [Artifact Registry](./artifact-registry) -- [Container Registry](./container-registry) -- [Source Repository](./source-repository) +- [API Gateway](./api-gateway) - [Apigee Organization](./apigee-organization) - [Apigee X Instance](./apigee-x-instance) -- [API Gateway](./api-gateway) +- [Artifact Registry](./artifact-registry) +- [Container Registry](./container-registry) +- [Cloud Source Repository](./source-repository) ## Security -- [Cloud KMS](./kms) -- [Secret Manager](./secret-manager) +- [Binauthz](./binauthz/) +- [KMS](./kms) +- [SecretManager](./secret-manager) - [VPC Service Control](./vpc-sc) ## Serverless diff --git a/modules/apigee-organization/README.md b/modules/apigee-organization/README.md index eceb4d13..150553a1 100644 --- a/modules/apigee-organization/README.md +++ b/modules/apigee-organization/README.md @@ -13,10 +13,16 @@ module "apigee-organization" { analytics_region = "us-central1" runtime_type = "CLOUD" authorized_network = "my-vpc" - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + } apigee_envgroups = { eval = { environments = [ @@ -42,12 +48,18 @@ module "apigee-organization" { runtime_type = "CLOUD" authorized_network = "my-vpc" database_encryption_key = "my-data-key" - apigee_environments = [ - "dev1", - "dev2", - "test1", - "test2" - ] + apigee_environments = { + dev1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + dev2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + test1 = {} + test2 = {} + } apigee_envgroups = { dev = { environments = [ @@ -80,10 +92,13 @@ module "apigee-organization" { project_id = "my-project" analytics_region = "us-central1" runtime_type = "HYBRID" - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = {} + } apigee_envgroups = { eval = { environments = [ @@ -105,15 +120,15 @@ module "apigee-organization" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [analytics_region](variables.tf#L17) | Analytics Region for the Apigee Organization (immutable). See https://cloud.google.com/apigee/docs/api-platform/get-started/install-cli. | string | ✓ | | -| [project_id](variables.tf#L61) | Project ID to host this Apigee organization (will also become the Apigee Org name). | string | ✓ | | -| [runtime_type](variables.tf#L66) | Apigee runtime type. Must be `CLOUD` or `HYBRID`. | string | ✓ | | +| [project_id](variables.tf#L72) | Project ID to host this Apigee organization (will also become the Apigee Org name). | string | ✓ | | +| [runtime_type](variables.tf#L77) | Apigee runtime type. Must be `CLOUD` or `HYBRID`. | string | ✓ | | | [apigee_envgroups](variables.tf#L22) | Apigee Environment Groups. | map(object({…})) | | {} | -| [apigee_environments](variables.tf#L31) | Apigee Environment Names. | list(string) | | [] | -| [authorized_network](variables.tf#L37) | VPC network self link (requires service network peering enabled (Used in Apigee X only). | string | | null | -| [billing_type](variables.tf#L75) | Billing type of the Apigee organization. | string | | null | -| [database_encryption_key](variables.tf#L43) | Cloud KMS key self link (e.g. `projects/foo/locations/us/keyRings/bar/cryptoKeys/baz`) used for encrypting the data that is stored and replicated across runtime instances (immutable, used in Apigee X only). | string | | null | -| [description](variables.tf#L49) | Description of the Apigee Organization. | string | | "Apigee Organization created by tf module" | -| [display_name](variables.tf#L55) | Display Name of the Apigee Organization. | string | | null | +| [apigee_environments](variables.tf#L31) | Apigee Environment Names. | map(object({…})) | | {} | +| [authorized_network](variables.tf#L48) | VPC network self link (requires service network peering enabled (Used in Apigee X only). | string | | null | +| [billing_type](variables.tf#L86) | Billing type of the Apigee organization. | string | | null | +| [database_encryption_key](variables.tf#L54) | Cloud KMS key self link (e.g. `projects/foo/locations/us/keyRings/bar/cryptoKeys/baz`) used for encrypting the data that is stored and replicated across runtime instances (immutable, used in Apigee X only). | string | | null | +| [description](variables.tf#L60) | Description of the Apigee Organization. | string | | "Apigee Organization created by tf module" | +| [display_name](variables.tf#L66) | Display Name of the Apigee Organization. | string | | null | ## Outputs diff --git a/modules/apigee-organization/main.tf b/modules/apigee-organization/main.tf index 148711a9..a498135b 100644 --- a/modules/apigee-organization/main.tf +++ b/modules/apigee-organization/main.tf @@ -15,6 +15,14 @@ */ locals { + env_pairs = flatten([ + for env_name, env in var.apigee_environments : { + api_proxy_type = env.api_proxy_type + deployment_type = env.deployment_type + env_name = env_name + } + ]) + env_envgroup_pairs = flatten([ for eg_name, eg in var.apigee_envgroups : [ for e in eg.environments : { @@ -37,9 +45,11 @@ resource "google_apigee_organization" "apigee_org" { } resource "google_apigee_environment" "apigee_env" { - for_each = toset(var.apigee_environments) - org_id = google_apigee_organization.apigee_org.id - name = each.key + for_each = { for env in local.env_pairs : env.env_name => env } + api_proxy_type = each.value.api_proxy_type + deployment_type = each.value.deployment_type + name = each.key + org_id = google_apigee_organization.apigee_org.id } resource "google_apigee_envgroup" "apigee_envgroup" { diff --git a/modules/apigee-organization/variables.tf b/modules/apigee-organization/variables.tf index b2b3eac9..b3d13e15 100644 --- a/modules/apigee-organization/variables.tf +++ b/modules/apigee-organization/variables.tf @@ -30,8 +30,19 @@ variable "apigee_envgroups" { variable "apigee_environments" { description = "Apigee Environment Names." - type = list(string) - default = [] + type = map(object({ + api_proxy_type = optional(string, "API_PROXY_TYPE_UNSPECIFIED") + deployment_type = optional(string, "DEPLOYMENT_TYPE_UNSPECIFIED") + })) + default = {} + validation { + condition = alltrue([for k, v in var.apigee_environments : contains(["API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE"], v.api_proxy_type)]) + error_message = "Allowed values for api_proxy_type \"API_PROXY_TYPE_UNSPECIFIED\", \"PROGRAMMABLE\" or \"CONFIGURABLE\"." + } + validation { + condition = alltrue([for k, v in var.apigee_environments : contains(["DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE"], v.deployment_type)]) + error_message = "Allowed values for deployment_type \"DEPLOYMENT_TYPE_UNSPECIFIED\", \"PROXY\" or \"ARCHIVE\"." + } } variable "authorized_network" { diff --git a/modules/cloud-config-container/simple-nva/README.md b/modules/cloud-config-container/simple-nva/README.md index 5014e9a3..3f5b0553 100644 --- a/modules/cloud-config-container/simple-nva/README.md +++ b/modules/cloud-config-container/simple-nva/README.md @@ -35,6 +35,13 @@ module "nva-cloud-config" { source = "../../../cloud-foundation-fabric/modules/cloud-config-container/simple-nva" enable_health_checks = true network_interfaces = local.network_interfaces + files = { + "/var/lib/cloud/scripts/per-boot/firewall-rules.sh" = { + content = file("./your_path/to/firewall-rules.sh") + owner = "root" + permissions = 0700 + } + } } # COS VM @@ -63,9 +70,10 @@ module "nva" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [network_interfaces](variables.tf#L29) | Network interfaces configuration. | list(object({…})) | ✓ | | +| [network_interfaces](variables.tf#L39) | Network interfaces configuration. | list(object({…})) | ✓ | | | [cloud_config](variables.tf#L17) | Cloud config template path. If null default will be used. | string | | null | -| [enable_health_checks](variables.tf#L23) | Configures routing to enable responses to health check probes. | bool | | false | +| [enable_health_checks](variables.tf#L33) | Configures routing to enable responses to health check probes. | bool | | false | +| [files](variables.tf#L23) | Map of extra files to create on the instance, path as key. Owner and permissions will use defaults if null. | map(object({…})) | | {} | | [test_instance](variables-instance.tf#L17) | Test/development instance attributes, leave null to skip creation. | object({…}) | | null | | [test_instance_defaults](variables-instance.tf#L30) | Test/development instance defaults used for optional configuration. If image is null, COS stable will be used. | object({…}) | | {…} | diff --git a/modules/cloud-config-container/simple-nva/cloud-config.yaml b/modules/cloud-config-container/simple-nva/cloud-config.yaml index 8d18a356..f1d71e82 100644 --- a/modules/cloud-config-container/simple-nva/cloud-config.yaml +++ b/modules/cloud-config-container/simple-nva/cloud-config.yaml @@ -22,17 +22,37 @@ write_files: content: | ${indent(6, data.content)} %{ endfor } + - path: /etc/systemd/system/routing.service + permissions: 0644 + owner: root + content: | + [Install] + WantedBy=multi-user.target + [Unit] + Description=Start routing + After=network-online.target + Wants=network-online.target + [Service] + ExecStart=/bin/sh -c "/var/run/nva/start-routing.sh" + - path: /var/run/nva/start-routing.sh + permissions: 0744 + owner: root + content: | + iptables --policy FORWARD ACCEPT +%{ for interface in network_interfaces ~} +%{ if enable_health_checks ~} + /var/run/nva/policy_based_routing.sh ${interface.name} +%{ endif ~} +%{ for route in interface.routes ~} + ip route add ${route} via `curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/${interface.number}/gateway -H "Metadata-Flavor:Google"` dev ${interface.name} +%{ endfor ~} +%{ endfor ~} bootcmd: - systemctl start node-problem-detector runcmd: - - iptables --policy FORWARD ACCEPT -%{ for interface in network_interfaces ~} -%{ if enable_health_checks ~} - - /var/run/nva/policy_based_routing.sh ${interface.name} -%{ endif ~} -%{ for route in interface.routes ~} - - ip route add ${route} via `curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/${interface.number}/gateway -H "Metadata-Flavor:Google"` dev ${interface.name} -%{ endfor ~} -%{ endfor ~} + - systemctl daemon-reload + - systemctl enable routing + - systemctl start routing + diff --git a/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh b/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh index 42ed0dcb..2e1eb152 100644 --- a/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh +++ b/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh @@ -15,13 +15,18 @@ # limitations under the License. IF_NAME=$1 -IF_NUMBER=$(echo $1 | sed -e s/eth//) -IF_GW=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/gateway -H "Metadata-Flavor: Google") -IF_IP=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/ip -H "Metadata-Flavor: Google") -IF_NETMASK=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/subnetmask -H "Metadata-Flavor: Google") -IF_IP_PREFIX=$(/var/run/nva/ipprefix_by_netmask.sh $IF_NETMASK) IP_LB=$(ip r show table local | grep "$IF_NAME proto 66" | cut -f 2 -d " ") -grep -qxF "$((200 + $IF_NUMBER)) hc-$IF_NAME" /etc/iproute2/rt_tables || echo "$((200 + $IF_NUMBER)) hc-$IF_NAME" >>/etc/iproute2/rt_tables -ip route add $IF_GW src $IF_IP dev $IF_NAME table hc-$IF_NAME -ip route add default via $IF_GW dev $IF_NAME table hc-$IF_NAME -ip rule add from $IP_LB/32 table hc-$IF_NAME + +# If there's a load balancer for this IF... +if [ ! -z $IP_LB ] +then + IF_NUMBER=$(echo $IF_NAME | sed -e s/eth//) + IF_GW=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/gateway -H "Metadata-Flavor: Google") + IF_IP=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/ip -H "Metadata-Flavor: Google") + IF_NETMASK=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/subnetmask -H "Metadata-Flavor: Google") + IF_IP_PREFIX=$(/var/run/nva/ipprefix_by_netmask.sh $IF_NETMASK) + grep -qxF "$((200 + $IF_NUMBER)) hc-$IF_NAME" /etc/iproute2/rt_tables || echo "$((200 + $IF_NUMBER)) hc-$IF_NAME" >>/etc/iproute2/rt_tables + ip route add $IF_GW src $IF_IP dev $IF_NAME table hc-$IF_NAME + ip route add default via $IF_GW dev $IF_NAME table hc-$IF_NAME + ip rule add from $IP_LB/32 table hc-$IF_NAME +fi diff --git a/modules/cloud-config-container/simple-nva/main.tf b/modules/cloud-config-container/simple-nva/main.tf index 5b9663bd..4ff0afe2 100644 --- a/modules/cloud-config-container/simple-nva/main.tf +++ b/modules/cloud-config-container/simple-nva/main.tf @@ -21,7 +21,7 @@ locals { network_interfaces = local.network_interfaces })) - files = { + files = merge({ "/var/run/nva/ipprefix_by_netmask.sh" = { content = file("${path.module}/files/ipprefix_by_netmask.sh") owner = "root" @@ -32,7 +32,13 @@ locals { owner = "root" permissions = "0744" } - } + }, { + for path, attrs in var.files : path => { + content = attrs.content, + owner = attrs.owner, + permissions = attrs.permissions + } + }) network_interfaces = [ for index, interface in var.network_interfaces : { diff --git a/modules/cloud-config-container/simple-nva/variables.tf b/modules/cloud-config-container/simple-nva/variables.tf index 9307ddac..3c2ebfcb 100644 --- a/modules/cloud-config-container/simple-nva/variables.tf +++ b/modules/cloud-config-container/simple-nva/variables.tf @@ -20,6 +20,16 @@ variable "cloud_config" { default = null } +variable "files" { + description = "Map of extra files to create on the instance, path as key. Owner and permissions will use defaults if null." + type = map(object({ + content = string + owner = string + permissions = string + })) + default = {} +} + variable "enable_health_checks" { description = "Configures routing to enable responses to health check probes." type = bool diff --git a/modules/dns/README.md b/modules/dns/README.md index ebd200ab..62b38efc 100644 --- a/modules/dns/README.md +++ b/modules/dns/README.md @@ -53,25 +53,58 @@ module "private-dns" { } # tftest modules=1 resources=1 ``` + +### Routing Policies + +```hcl +module "private-dns" { + source = "./fabric/modules/dns" + project_id = "myproject" + type = "private" + name = "test-example" + domain = "test.example." + client_networks = [var.vpc.self_link] + recordsets = { + "A regular" = { records = ["10.20.0.1"] } + "A geo" = { + geo_routing = [ + { location = "europe-west1", records = ["10.0.0.1"] }, + { location = "europe-west2", records = ["10.0.0.2"] }, + { location = "europe-west3", records = ["10.0.0.3"] } + ] + } + + "A wrr" = { + ttl = 600 + wrr_routing = [ + { weight = 0.6, records = ["10.10.0.1"] }, + { weight = 0.2, records = ["10.10.0.2"] }, + { weight = 0.2, records = ["10.10.0.3"] } + ] + } + } +} +# tftest modules=1 resources=4 +``` ## Variables | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [domain](variables.tf#L51) | Zone domain, must end with a period. | string | ✓ | | -| [name](variables.tf#L69) | Zone name, must be unique within the project. | string | ✓ | | -| [project_id](variables.tf#L80) | Project id for the zone. | string | ✓ | | +| [domain](variables.tf#L54) | Zone domain, must end with a period. | string | ✓ | | +| [name](variables.tf#L72) | Zone name, must be unique within the project. | string | ✓ | | +| [project_id](variables.tf#L83) | Project id for the zone. | string | ✓ | | | [client_networks](variables.tf#L21) | List of VPC self links that can see this zone. | list(string) | | [] | | [description](variables.tf#L28) | Domain description. | string | | "Terraform managed." | -| [dnssec_config](variables.tf#L34) | DNSSEC configuration for this zone. | object({…}) | | null | -| [enable_logging](variables.tf#L62) | Enable query logging for this zone. Only valid for public zones. | bool | | false | -| [forwarders](variables.tf#L56) | Map of {IPV4_ADDRESS => FORWARDING_PATH} for 'forwarding' zone types. Path can be 'default', 'private', or null for provider default. | map(string) | | {} | -| [peer_network](variables.tf#L74) | Peering network self link, only valid for 'peering' zone types. | string | | null | -| [recordsets](variables.tf#L85) | Map of DNS recordsets in \"type name\" => {ttl, [records]} format. | map(object({…})) | | {} | -| [service_directory_namespace](variables.tf#L102) | Service directory namespace id (URL), only valid for 'service-directory' zone types. | string | | null | -| [type](variables.tf#L108) | Type of zone to create, valid values are 'public', 'private', 'forwarding', 'peering', 'service-directory'. | string | | "private" | -| [zone_create](variables.tf#L118) | Create zone. When set to false, uses a data source to reference existing zone. | bool | | true | +| [dnssec_config](variables.tf#L34) | DNSSEC configuration for this zone. | object({…}) | | {…} | +| [enable_logging](variables.tf#L65) | Enable query logging for this zone. Only valid for public zones. | bool | | false | +| [forwarders](variables.tf#L59) | Map of {IPV4_ADDRESS => FORWARDING_PATH} for 'forwarding' zone types. Path can be 'default', 'private', or null for provider default. | map(string) | | {} | +| [peer_network](variables.tf#L77) | Peering network self link, only valid for 'peering' zone types. | string | | null | +| [recordsets](variables.tf#L88) | Map of DNS recordsets in \"type name\" => {ttl, [records]} format. | map(object({…})) | | {} | +| [service_directory_namespace](variables.tf#L123) | Service directory namespace id (URL), only valid for 'service-directory' zone types. | string | | null | +| [type](variables.tf#L129) | Type of zone to create, valid values are 'public', 'private', 'forwarding', 'peering', 'service-directory'. | string | | "private" | +| [zone_create](variables.tf#L139) | Create zone. When set to false, uses a data source to reference existing zone. | bool | | true | ## Outputs diff --git a/modules/dns/main.tf b/modules/dns/main.tf index ed687d97..c1687761 100644 --- a/modules/dns/main.tf +++ b/modules/dns/main.tf @@ -15,10 +15,42 @@ */ locals { - recordsets = { + # split record name and type and set as keys in a map + _recordsets_0 = { for key, attrs in var.recordsets : key => merge(attrs, zipmap(["type", "name"], split(" ", key))) } + # compute the final resource name for the recordset + _recordsets = { + for key, attrs in local._recordsets_0 : + key => merge(attrs, { + resource_name = ( + attrs.name == "" + ? var.domain + : ( + substr(attrs.name, -1, 1) == "." + ? attrs.name + : "${attrs.name}.${var.domain}" + ) + ) + }) + } + # split recordsets between regular, geo and wrr + geo_recordsets = { + for k, v in local._recordsets : + k => v + if v.geo_routing != null + } + regular_recordsets = { + for k, v in local._recordsets : + k => v + if v.records != null + } + wrr_recordsets = { + for k, v in local._recordsets : + k => v + if v.wrr_routing != null + } zone = ( var.zone_create ? try( @@ -149,23 +181,72 @@ data "google_dns_keys" "dns_keys" { resource "google_dns_record_set" "cloud-static-records" { for_each = ( var.type == "public" || var.type == "private" - ? local.recordsets + ? local.regular_recordsets : {} ) project = var.project_id managed_zone = var.name - name = ( - each.value.name == "" - ? var.domain - : ( - substr(each.value.name, -1, 1) == "." - ? each.value.name - : "${each.value.name}.${var.domain}" - ) - ) - type = each.value.type - ttl = each.value.ttl - rrdatas = each.value.records + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + rrdatas = each.value.records + + depends_on = [ + google_dns_managed_zone.non-public, google_dns_managed_zone.public + ] +} + +resource "google_dns_record_set" "cloud-geo-records" { + for_each = ( + var.type == "public" || var.type == "private" + ? local.geo_recordsets + : {} + ) + project = var.project_id + managed_zone = var.name + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + + routing_policy { + dynamic "geo" { + for_each = each.value.geo_routing + iterator = policy + content { + location = policy.value.location + rrdatas = policy.value.records + } + } + } + + depends_on = [ + google_dns_managed_zone.non-public, google_dns_managed_zone.public + ] +} + +resource "google_dns_record_set" "cloud-wrr-records" { + for_each = ( + var.type == "public" || var.type == "private" + ? local.wrr_recordsets + : {} + ) + project = var.project_id + managed_zone = var.name + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + + routing_policy { + dynamic "wrr" { + for_each = each.value.wrr_routing + iterator = policy + content { + weight = policy.value.weight + rrdatas = policy.value.records + } + } + } + depends_on = [ google_dns_managed_zone.non-public, google_dns_managed_zone.public ] diff --git a/modules/dns/variables.tf b/modules/dns/variables.tf index 749bbdd5..aafe6a1d 100644 --- a/modules/dns/variables.tf +++ b/modules/dns/variables.tf @@ -45,7 +45,10 @@ variable "dnssec_config" { { algorithm = "rsasha256", key_length = 1024 } ) }) - default = null + default = { + state = "off" + } + nullable = false } variable "domain" { @@ -86,17 +89,35 @@ variable "recordsets" { description = "Map of DNS recordsets in \"type name\" => {ttl, [records]} format." type = map(object({ ttl = optional(number, 300) - records = list(string) + records = optional(list(string)) + geo_routing = optional(list(object({ + location = string + records = list(string) + }))) + wrr_routing = optional(list(object({ + weight = number + records = list(string) + }))) })) default = {} nullable = false validation { condition = alltrue([ - for k, v in var.recordsets == null ? {} : var.recordsets : + for k, v in coalesce(var.recordsets, {}) : length(split(" ", k)) == 2 ]) error_message = "Recordsets must have keys in the format \"type name\"." } + validation { + condition = alltrue([ + for k, v in coalesce(var.recordsets, {}) : ( + (v.records != null && v.wrr_routing == null && v.geo_routing == null) || + (v.records == null && v.wrr_routing != null && v.geo_routing == null) || + (v.records == null && v.wrr_routing == null && v.geo_routing != null) + ) + ]) + error_message = "Only one of records, wrr_routing or geo_routing can be defined for each recordset." + } } variable "service_directory_namespace" { diff --git a/modules/gke-cluster/README.md b/modules/gke-cluster/README.md index be0a9f62..55b594c6 100644 --- a/modules/gke-cluster/README.md +++ b/modules/gke-cluster/README.md @@ -77,9 +77,9 @@ module "cluster-1" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [location](variables.tf#L117) | Cluster zone or region. | string | ✓ | | -| [name](variables.tf#L169) | Cluster name. | string | ✓ | | -| [project_id](variables.tf#L195) | Cluster project id. | string | ✓ | | -| [vpc_config](variables.tf#L206) | VPC-level configuration. | object({…}) | ✓ | | +| [name](variables.tf#L174) | Cluster name. | string | ✓ | | +| [project_id](variables.tf#L200) | Cluster project id. | string | ✓ | | +| [vpc_config](variables.tf#L211) | VPC-level configuration. | object({…}) | ✓ | | | [cluster_autoscaling](variables.tf#L17) | Enable and configure limits for Node Auto-Provisioning with Cluster Autoscaler. | object({…}) | | null | | [description](variables.tf#L38) | Cluster description. | string | | null | | [enable_addons](variables.tf#L44) | Addons enabled in the cluster (true means enabled). | object({…}) | | {…} | @@ -90,10 +90,10 @@ module "cluster-1" { | [maintenance_config](variables.tf#L128) | Maintenance window configuration. | object({…}) | | {…} | | [max_pods_per_node](variables.tf#L151) | Maximum number of pods per node in this cluster. | number | | 110 | | [min_master_version](variables.tf#L157) | Minimum version of the master, defaults to the version of the most recent official release. | string | | null | -| [monitoring_config](variables.tf#L163) | Monitoring components. | list(string) | | ["SYSTEM_COMPONENTS"] | -| [node_locations](variables.tf#L174) | Zones in which the cluster's nodes are located. | list(string) | | [] | -| [private_cluster_config](variables.tf#L181) | Private cluster configuration. | object({…}) | | null | -| [release_channel](variables.tf#L200) | Release channel for GKE upgrades. | string | | null | +| [monitoring_config](variables.tf#L163) | Monitoring components. | object({…}) | | {…} | +| [node_locations](variables.tf#L179) | Zones in which the cluster's nodes are located. | list(string) | | [] | +| [private_cluster_config](variables.tf#L186) | Private cluster configuration. | object({…}) | | null | +| [release_channel](variables.tf#L205) | Release channel for GKE upgrades. | string | | null | ## Outputs diff --git a/modules/gke-cluster/main.tf b/modules/gke-cluster/main.tf index 9981d9b4..bc94dd37 100644 --- a/modules/gke-cluster/main.tf +++ b/modules/gke-cluster/main.tf @@ -41,7 +41,7 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 remove_default_node_pool = var.enable_features.autopilot ? null : true datapath_provider = ( - var.enable_features.dataplane_v2 + var.enable_features.dataplane_v2 || var.enable_features.autopilot ? "ADVANCED_DATAPATH" : "DATAPATH_PROVIDER_UNSPECIFIED" ) @@ -240,7 +240,15 @@ resource "google_container_cluster" "cluster" { dynamic "monitoring_config" { for_each = var.monitoring_config != null && !var.enable_features.autopilot ? [""] : [] content { - enable_components = var.monitoring_config + enable_components = var.monitoring_config.enable_components + dynamic "managed_prometheus" { + for_each = ( + try(var.monitoring_config.managed_prometheus, null) == true ? [""] : [] + ) + content { + enabled = true + } + } } } diff --git a/modules/gke-cluster/variables.tf b/modules/gke-cluster/variables.tf index a227d5c7..f9a3b69e 100644 --- a/modules/gke-cluster/variables.tf +++ b/modules/gke-cluster/variables.tf @@ -162,8 +162,13 @@ variable "min_master_version" { variable "monitoring_config" { description = "Monitoring components." - type = list(string) - default = ["SYSTEM_COMPONENTS"] + type = object({ + enable_components = optional(list(string)) + managed_prometheus = optional(bool) + }) + default = { + enable_components = ["SYSTEM_COMPONENTS"] + } } variable "name" { diff --git a/modules/gke-hub/README.md b/modules/gke-hub/README.md index 2573ac9d..1a3c547c 100644 --- a/modules/gke-hub/README.md +++ b/modules/gke-hub/README.md @@ -257,7 +257,7 @@ module "cluster_1_nodepool" { location = "europe-west1" name = "nodepool" node_count = { initial = 1 } - service_account = {} + service_account = { create = true } tags = ["cluster-1-node"] } @@ -292,7 +292,7 @@ module "cluster_2_nodepool" { location = "europe-west4" name = "nodepool" node_count = { initial = 1 } - service_account = {} + service_account = { create = true } tags = ["cluster-2-node"] } diff --git a/modules/gke-nodepool/README.md b/modules/gke-nodepool/README.md index d464656f..4c471c60 100644 --- a/modules/gke-nodepool/README.md +++ b/modules/gke-nodepool/README.md @@ -21,7 +21,13 @@ module "cluster-1-nodepool-1" { ### Internally managed service account -To have the module auto-create a service account for the nodes, define the `service_account` variable without setting its `email` attribute. You can then specify service account scopes, or use the default. The service account resource and email (in both plain and IAM formats) are then available in outputs to assign IAM roles from your own code. +There are three different approaches to defining the nodes service account, all depending on the `service_account` variable where the `create` attribute controls creation of a new service account by this module, and the `email` attribute controls the actual service account to use. + +If you create a new service account, its resource and email (in both plain and IAM formats) are then available in outputs to reference it in other modules or resources. + +#### GCE default service account + +To use the GCE default service account, you can ignore the variable which is equivalent to `{ create = null, email = null }`. ```hcl module "cluster-1-nodepool-1" { @@ -30,7 +36,44 @@ module "cluster-1-nodepool-1" { cluster_name = "cluster-1" location = "europe-west1-b" name = "nodepool-1" - service_account = {} +} +# tftest modules=1 resources=1 +``` + +#### Externally defined service account + +To use an existing service account, pass in just the `email` attribute. + +```hcl +module "cluster-1-nodepool-1" { + source = "./fabric/modules/gke-nodepool" + project_id = "myproject" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + service_account = { + email = "foo-bar@myproject.iam.gserviceaccount.com" + } +} +# tftest modules=1 resources=1 +``` + +#### Auto-created service account + +To have the module create a service account, set the `create` attribute to `true` and optionally pass the desired account id in `email`. + +```hcl +module "cluster-1-nodepool-1" { + source = "./fabric/modules/gke-nodepool" + project_id = "myproject" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + service_account = { + create = true + # optional + email = "spam-eggs" + } } # tftest modules=1 resources=2 ``` @@ -53,10 +96,10 @@ module "cluster-1-nodepool-1" { | [nodepool_config](variables.tf#L109) | Nodepool-level configuration. | object({…}) | | null | | [pod_range](variables.tf#L131) | Pod secondary range configuration. | object({…}) | | null | | [reservation_affinity](variables.tf#L148) | Configuration of the desired reservation which instances could take capacity from. | object({…}) | | null | -| [service_account](variables.tf#L158) | Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used. | object({…}) | | null | -| [sole_tenant_nodegroup](variables.tf#L167) | Sole tenant node group. | string | | null | -| [tags](variables.tf#L173) | Network tags applied to nodes. | list(string) | | null | -| [taints](variables.tf#L179) | Kubernetes taints applied to all nodes. | list(object({…})) | | null | +| [service_account](variables.tf#L158) | Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used. | object({…}) | | {} | +| [sole_tenant_nodegroup](variables.tf#L169) | Sole tenant node group. | string | | null | +| [tags](variables.tf#L175) | Network tags applied to nodes. | list(string) | | null | +| [taints](variables.tf#L181) | Kubernetes taints applied to all nodes. | list(object({…})) | | null | ## Outputs diff --git a/modules/gke-nodepool/main.tf b/modules/gke-nodepool/main.tf index 6a3714f0..0c35c8d0 100644 --- a/modules/gke-nodepool/main.tf +++ b/modules/gke-nodepool/main.tf @@ -31,17 +31,14 @@ locals { ) # if no attributes passed for service account, use the GCE default # if no email specified, create service account - service_account_create = ( - var.service_account != null && try(var.service_account.email, null) == null - ) service_account_email = ( - local.service_account_create + var.service_account.create ? google_service_account.service_account[0].email - : try(var.service_account.email, null) + : var.service_account.email ) service_account_scopes = ( - try(var.service_account.scopes, null) != null - ? var.service_account.scopes + var.service_account.oauth_scopes != null + ? var.service_account.oauth_scopes : [ "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", @@ -60,9 +57,13 @@ locals { } resource "google_service_account" "service_account" { - count = local.service_account_create ? 1 : 0 - project = var.project_id - account_id = "tf-gke-${var.name}" + count = var.service_account.create ? 1 : 0 + project = var.project_id + account_id = ( + var.service_account.email != null + ? split("@", var.service_account.email)[0] + : "tf-gke-${var.name}" + ) display_name = "Terraform GKE ${var.cluster_name} ${var.name}." } diff --git a/modules/gke-nodepool/variables.tf b/modules/gke-nodepool/variables.tf index dec5b823..15c8a151 100644 --- a/modules/gke-nodepool/variables.tf +++ b/modules/gke-nodepool/variables.tf @@ -158,10 +158,12 @@ variable "reservation_affinity" { variable "service_account" { description = "Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used." type = object({ - email = optional(string) - oauth_scopes = optional(list(string)) + create = optional(bool, false) + email = optional(string, null) + oauth_scopes = optional(list(string), null) }) - default = null + default = {} + nullable = false } variable "sole_tenant_nodegroup" { diff --git a/modules/net-vpc/README.md b/modules/net-vpc/README.md index 84377bd8..0d6a231e 100644 --- a/modules/net-vpc/README.md +++ b/modules/net-vpc/README.md @@ -276,8 +276,8 @@ flow_logs: # enable, set to empty map to use defaults | [subnet_iam](variables.tf#L133) | Subnet IAM bindings in {REGION/NAME => {ROLE => [MEMBERS]} format. | map(map(list(string))) | | {} | | [subnets](variables.tf#L139) | Subnet configuration. | list(object({…})) | | [] | | [subnets_proxy_only](variables.tf#L164) | List of proxy-only subnets for Regional HTTPS or Internal HTTPS load balancers. Note: Only one proxy-only subnet for each VPC network in each region can be active. | list(object({…})) | | [] | -| [subnets_psc](variables.tf#L176) | List of subnets for Private Service Connect service producers. | list(object({…})) | | [] | -| [vpc_create](variables.tf#L186) | Create VPC. When set to false, uses a data source to reference existing VPC. | bool | | true | +| [subnets_psc](variables.tf#L176) | List of subnets for Private Service Connect service producers. | list(object({…})) | | [] | +| [vpc_create](variables.tf#L187) | Create VPC. When set to false, uses a data source to reference existing VPC. | bool | | true | ## Outputs diff --git a/modules/net-vpc/subnets.tf b/modules/net-vpc/subnets.tf index 0496405b..ae094ecf 100644 --- a/modules/net-vpc/subnets.tf +++ b/modules/net-vpc/subnets.tf @@ -72,13 +72,17 @@ locals { } resource "google_compute_subnetwork" "subnetwork" { - for_each = local.subnets - project = var.project_id - network = local.network.name - name = each.value.name - region = each.value.region - ip_cidr_range = each.value.ip_cidr_range - description = try(each.value.description, "Terraform-managed.") + for_each = local.subnets + project = var.project_id + network = local.network.name + name = each.value.name + region = each.value.region + ip_cidr_range = each.value.ip_cidr_range + description = ( + each.value.description == null + ? "Terraform-managed." + : each.value.description + ) private_ip_google_access = each.value.enable_private_access secondary_ip_range = each.value.secondary_ip_ranges == null ? [] : [ for name, range in each.value.secondary_ip_ranges : @@ -107,9 +111,10 @@ resource "google_compute_subnetwork" "proxy_only" { name = each.value.name region = each.value.region ip_cidr_range = each.value.ip_cidr_range - description = try( - each.value.description, - "Terraform-managed proxy-only subnet for Regional HTTPS or Internal HTTPS LB." + description = ( + each.value.description == null + ? "Terraform-managed proxy-only subnet for Regional HTTPS or Internal HTTPS LB." + : each.value.description ) purpose = "REGIONAL_MANAGED_PROXY" role = ( @@ -124,9 +129,10 @@ resource "google_compute_subnetwork" "psc" { name = each.value.name region = each.value.region ip_cidr_range = each.value.ip_cidr_range - description = try( - each.value.description, - "Terraform-managed subnet for Private Service Connect (PSC NAT)." + description = ( + each.value.description == null + ? "Terraform-managed subnet for Private Service Connect (PSC NAT)." + : each.value.description ) purpose = "PRIVATE_SERVICE_CONNECT" } diff --git a/modules/net-vpc/variables.tf b/modules/net-vpc/variables.tf index 89207479..a7aa2077 100644 --- a/modules/net-vpc/variables.tf +++ b/modules/net-vpc/variables.tf @@ -179,6 +179,7 @@ variable "subnets_psc" { name = string ip_cidr_range = string region = string + description = optional(string) })) default = [] } diff --git a/tests/blueprints/cloud_operations/glb_and_armor/__init__.py b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/__init__.py similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/__init__.py rename to tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/__init__.py diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf new file mode 100644 index 00000000..3552740c --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "test" { + source = "../../../../../../blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider" + billing_account = var.billing_account + project_create = var.project_create + project_id = var.project_id + parent = var.parent + tfe_organization_id = var.tfe_organization_id + tfe_workspace_id = var.tfe_workspace_id + workload_identity_pool_id = var.workload_identity_pool_id + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + issuer_uri = var.issuer_uri +} diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf new file mode 100644 index 00000000..d99981c0 --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf @@ -0,0 +1,68 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +variable "billing_account" { + type = string + default = "1234-ABCD-1234" +} + +variable "project_create" { + type = bool + default = true +} + +variable "project_id" { + type = string + default = "project-1" +} + +variable "parent" { + description = "Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format." + type = string + default = null + validation { + condition = var.parent == null || can(regex("(organizations|folders)/[0-9]+", var.parent)) + error_message = "Parent must be of the form folders/folder_id or organizations/organization_id." + } +} + +variable "tfe_organization_id" { + description = "TFE organization id." + type = string + default = "org-123" +} + +variable "tfe_workspace_id" { + description = "TFE workspace id." + type = string + default = "ws-123" +} + +variable "workload_identity_pool_id" { + description = "Workload identity pool id." + type = string + default = "tfe-pool" +} + +variable "workload_identity_pool_provider_id" { + description = "Workload identity pool provider id." + type = string + default = "tfe-provider" +} + +variable "issuer_uri" { + description = "Terraform Enterprise uri. Replace the uri if a self hosted instance is used." + type = string + default = "https://app.terraform.io/" +} diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py new file mode 100644 index 00000000..228e51df --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py @@ -0,0 +1,19 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def test_resources(e2e_plan_runner): + "Test that plan works and the numbers of resources is as expected." + modules, resources = e2e_plan_runner() + assert len(modules) == 2 + assert len(resources) == 10 diff --git a/tests/blueprints/networking/glb_and_armor/__init__.py b/tests/blueprints/networking/glb_and_armor/__init__.py new file mode 100644 index 00000000..6d6d1266 --- /dev/null +++ b/tests/blueprints/networking/glb_and_armor/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf b/tests/blueprints/networking/glb_and_armor/fixture/main.tf similarity index 89% rename from tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf rename to tests/blueprints/networking/glb_and_armor/fixture/main.tf index e02d1093..155677b2 100644 --- a/tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf +++ b/tests/blueprints/networking/glb_and_armor/fixture/main.tf @@ -13,7 +13,7 @@ # limitations under the License. module "test" { - source = "../../../../../blueprints/cloud-operations/glb_and_armor" + source = "../../../../../blueprints/networking/glb-and-armor" project_create = var.project_create project_id = var.project_id enforce_security_policy = var.enforce_security_policy diff --git a/tests/blueprints/cloud_operations/glb_and_armor/fixture/variables.tf b/tests/blueprints/networking/glb_and_armor/fixture/variables.tf similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/fixture/variables.tf rename to tests/blueprints/networking/glb_and_armor/fixture/variables.tf diff --git a/tests/blueprints/cloud_operations/glb_and_armor/test_plan.py b/tests/blueprints/networking/glb_and_armor/test_plan.py similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/test_plan.py rename to tests/blueprints/networking/glb_and_armor/test_plan.py diff --git a/tests/modules/apigee_organization/fixture/main.tf b/tests/modules/apigee_organization/fixture/main.tf index 9dfb49bc..37fa536b 100644 --- a/tests/modules/apigee_organization/fixture/main.tf +++ b/tests/modules/apigee_organization/fixture/main.tf @@ -21,10 +21,17 @@ module "test" { runtime_type = "CLOUD" billing_type = "EVALUATION" authorized_network = var.network - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + eval3 = {} + } apigee_envgroups = { eval = { environments = [ diff --git a/tests/modules/apigee_organization/test_plan.py b/tests/modules/apigee_organization/test_plan.py index ec2312c9..6e873bc0 100644 --- a/tests/modules/apigee_organization/test_plan.py +++ b/tests/modules/apigee_organization/test_plan.py @@ -23,7 +23,7 @@ def resources(plan_runner): def test_resource_count(resources): "Test number of resources created." - assert len(resources) == 6 + assert len(resources) == 7 def test_envgroup_attachment(resources): @@ -42,3 +42,19 @@ def test_envgroup(resources): assert envgroups[0]['name'] == 'eval' assert len(envgroups[0]['hostnames']) == 1 assert envgroups[0]['hostnames'][0] == 'eval.api.example.com' + + +def test_env(resources): + "Test environments." + envs = [r['values'] for r in resources if r['type'] + == 'google_apigee_environment'] + assert len(envs) == 3 + assert envs[0]['name'] == 'eval1' + assert envs[0]['api_proxy_type'] == 'PROGRAMMABLE' + assert envs[0]['deployment_type'] == 'PROXY' + assert envs[1]['name'] == 'eval2' + assert envs[1]['api_proxy_type'] == 'CONFIGURABLE' + assert envs[1]['deployment_type'] == 'ARCHIVE' + assert envs[2]['name'] == 'eval3' + assert envs[2]['api_proxy_type'] == 'API_PROXY_TYPE_UNSPECIFIED' + assert envs[2]['deployment_type'] == 'DEPLOYMENT_TYPE_UNSPECIFIED' diff --git a/tests/modules/dns/fixture/variables.tf b/tests/modules/dns/fixture/variables.tf index 522b238a..0fc6871a 100644 --- a/tests/modules/dns/fixture/variables.tf +++ b/tests/modules/dns/fixture/variables.tf @@ -32,15 +32,27 @@ variable "peer_network" { } variable "recordsets" { - type = map(object({ - ttl = number - records = list(string) - })) + type = any default = { "A localhost" = { ttl = 300, records = ["127.0.0.1"] } "A local-host.test.example." = { ttl = 300, records = ["127.0.0.2"] } "CNAME *" = { ttl = 300, records = ["localhost.example.org."] } "A " = { ttl = 300, records = ["127.0.0.3"] } + "A geo" = { + geo_routing = [ + { location = "europe-west1", records = ["127.0.0.4"] }, + { location = "europe-west2", records = ["127.0.0.5"] }, + { location = "europe-west3", records = ["127.0.0.6"] } + ] + } + "A wrr" = { + ttl = 600 + wrr_routing = [ + { weight = 0.6, records = ["127.0.0.7"] }, + { weight = 0.2, records = ["127.0.0.8"] }, + { weight = 0.2, records = ["10.10.0.9"] } + ] + } } } diff --git a/tests/modules/dns/test_plan.py b/tests/modules/dns/test_plan.py index 184ffe5d..a5f7407b 100644 --- a/tests/modules/dns/test_plan.py +++ b/tests/modules/dns/test_plan.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_private(plan_runner): "Test private zone with three recordsets." _, resources = plan_runner() - assert len(resources) == 5 - assert set(r['type'] for r in resources) == set([ - 'google_dns_record_set', 'google_dns_managed_zone' - ]) + assert len(resources) == 7 + assert set(r['type'] for r in resources) == set( + ['google_dns_record_set', 'google_dns_managed_zone']) for r in resources: if r['type'] != 'google_dns_managed_zone': continue @@ -29,15 +29,54 @@ def test_private(plan_runner): def test_private_recordsets(plan_runner): "Test recordsets in private zone." _, resources = plan_runner() - recordsets = [r['values'] - for r in resources if r['type'] == 'google_dns_record_set'] + recordsets = [ + r['values'] for r in resources if r['type'] == 'google_dns_record_set' + ] + assert set(r['name'] for r in recordsets) == set([ - 'localhost.test.example.', - 'local-host.test.example.', - '*.test.example.', - "test.example." + 'localhost.test.example.', 'local-host.test.example.', '*.test.example.', + "test.example.", "geo.test.example.", "wrr.test.example." ]) + for r in recordsets: + if r['name'] not in ['wrr.test.example.', 'geo.test.example.']: + assert r['routing_policy'] == [] + assert r['rrdatas'] != [] + + geo_zone = [ + r['values'] for r in resources if r['address'] == + 'module.test.google_dns_record_set.cloud-geo-records["A geo"]' + ][0] + assert geo_zone['name'] == 'geo.test.example.' + assert geo_zone['routing_policy'][0]['wrr'] == [] + assert geo_zone['routing_policy'][0]['geo'] == [{ + 'location': 'europe-west1', + 'rrdatas': ['127.0.0.4'] + }, { + 'location': 'europe-west2', + 'rrdatas': ['127.0.0.5'] + }, { + 'location': 'europe-west3', + 'rrdatas': ['127.0.0.6'] + }] + + wrr_zone = [ + r['values'] for r in resources if r['address'] == + 'module.test.google_dns_record_set.cloud-wrr-records["A wrr"]' + ][0] + assert wrr_zone['name'] == 'wrr.test.example.' + assert wrr_zone['routing_policy'][0]['wrr'] == [{ + 'rrdatas': ['127.0.0.7'], + 'weight': 0.6 + }, { + 'rrdatas': ['127.0.0.8'], + 'weight': 0.2 + }, { + 'rrdatas': ['10.10.0.9'], + 'weight': 0.2 + }] + assert wrr_zone['routing_policy'][0]['geo'] == [] + def test_private_no_networks(plan_runner): "Test private zone not exposed to any network." @@ -60,26 +99,31 @@ def test_forwarding_recordsets_null_forwarders(plan_runner): def test_forwarding(plan_runner): "Test forwarding zone with single forwarder." - _, resources = plan_runner( - type='forwarding', recordsets='null', - forwarders='{ "1.2.3.4" = null }') + _, resources = plan_runner(type='forwarding', recordsets='null', + forwarders='{ "1.2.3.4" = null }') assert len(resources) == 1 resource = resources[0] assert resource['type'] == 'google_dns_managed_zone' - assert resource['values']['forwarding_config'] == [{'target_name_servers': [ - {'forwarding_path': '', 'ipv4_address': '1.2.3.4'}]}] + assert resource['values']['forwarding_config'] == [{ + 'target_name_servers': [{ + 'forwarding_path': '', + 'ipv4_address': '1.2.3.4' + }] + }] def test_peering(plan_runner): "Test peering zone." - _, resources = plan_runner(type='peering', - recordsets='null', + _, resources = plan_runner(type='peering', recordsets='null', peer_network='dummy-vpc-self-link') assert len(resources) == 1 resource = resources[0] assert resource['type'] == 'google_dns_managed_zone' - assert resource['values']['peering_config'] == [ - {'target_network': [{'network_url': 'dummy-vpc-self-link'}]}] + assert resource['values']['peering_config'] == [{ + 'target_network': [{ + 'network_url': 'dummy-vpc-self-link' + }] + }] def test_public(plan_runner): diff --git a/tests/modules/gke_cluster/fixture/variables.tf b/tests/modules/gke_cluster/fixture/variables.tf index 1b539d20..97fc6a63 100644 --- a/tests/modules/gke_cluster/fixture/variables.tf +++ b/tests/modules/gke_cluster/fixture/variables.tf @@ -28,3 +28,10 @@ variable "enable_features" { workload_identity = true } } + +variable "monitoring_config" { + type = any + default = { + managed_prometheus = true + } +} diff --git a/tests/modules/gke_nodepool/fixture/main.tf b/tests/modules/gke_nodepool/fixture/main.tf index aaa030b9..4ee27482 100644 --- a/tests/modules/gke_nodepool/fixture/main.tf +++ b/tests/modules/gke_nodepool/fixture/main.tf @@ -14,22 +14,31 @@ * limitations under the License. */ +resource "google_service_account" "test" { + project = "my-project" + account_id = "gke-nodepool-test" + display_name = "Test Service Account" +} + module "test" { - source = "../../../../modules/gke-nodepool" - project_id = "my-project" - cluster_name = "cluster-1" - location = "europe-west1-b" - name = "nodepool-1" - gke_version = var.gke_version - labels = var.labels - max_pods_per_node = var.max_pods_per_node - node_config = var.node_config - node_count = var.node_count - node_locations = var.node_locations - nodepool_config = var.nodepool_config - pod_range = var.pod_range - reservation_affinity = var.reservation_affinity - service_account = var.service_account + source = "../../../../modules/gke-nodepool" + project_id = "my-project" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + gke_version = var.gke_version + labels = var.labels + max_pods_per_node = var.max_pods_per_node + node_config = var.node_config + node_count = var.node_count + node_locations = var.node_locations + nodepool_config = var.nodepool_config + pod_range = var.pod_range + reservation_affinity = var.reservation_affinity + service_account = { + create = var.service_account_create + email = google_service_account.test.email + } sole_tenant_nodegroup = var.sole_tenant_nodegroup tags = var.tags taints = var.taints diff --git a/tests/modules/gke_nodepool/fixture/variables.tf b/tests/modules/gke_nodepool/fixture/variables.tf index 420b9eb0..18376ec5 100644 --- a/tests/modules/gke_nodepool/fixture/variables.tf +++ b/tests/modules/gke_nodepool/fixture/variables.tf @@ -65,9 +65,9 @@ variable "reservation_affinity" { default = null } -variable "service_account" { - type = any - default = null +variable "service_account_create" { + type = bool + default = false } variable "sole_tenant_nodegroup" { diff --git a/tests/modules/gke_nodepool/test_plan.py b/tests/modules/gke_nodepool/test_plan.py index fd63f332..75d1cc14 100644 --- a/tests/modules/gke_nodepool/test_plan.py +++ b/tests/modules/gke_nodepool/test_plan.py @@ -21,9 +21,9 @@ def test_defaults(plan_runner): def test_service_account(plan_runner): - _, resources = plan_runner(service_account='{email="foo@example.org"}') + _, resources = plan_runner() assert len(resources) == 1 - _, resources = plan_runner(service_account='{}') + _, resources = plan_runner(service_account_create='true') assert len(resources) == 2 assert 'google_service_account' in [r['type'] for r in resources]