diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 21c0746a..1c63b78d 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -53,12 +53,12 @@ jobs: run: | terraform fmt -recursive -check -diff $GITHUB_WORKSPACE - - name: Check documentation (fabric) + - name: Check documentation id: documentation-fabric run: | - python3 tools/check_documentation.py examples modules fast + python3 tools/check_documentation.py modules fast blueprints - - name: Check documentation links (fabric) + - name: Check documentation links id: documentation-links-fabric run: | python3 tools/check_links.py . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2a903525..3fc3fe56 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,7 +33,7 @@ env: TF_VERSION: 1.3.2 jobs: - doc-examples: + examples: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -68,7 +68,7 @@ jobs: pip install -r tests/requirements.txt pytest -vv tests/examples - examples: + blueprints: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/.gitignore b/.gitignore index ec7ead32..e0bfaac4 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,4 @@ examples/cloud-operations/adfs/ansible/vars/vars.yaml examples/cloud-operations/adfs/ansible/gssh.sh examples/cloud-operations/multi-cluster-mesh-gke-fleet-api/ansible/vars.yaml examples/cloud-operations/multi-cluster-mesh-gke-fleet-api/ansible/gssh.sh +blueprints/cloud-operations/network-dashboard/cloud-function.zip diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f751674..62a5afde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,21 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +- [[#939](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/939)] Temporarily duplicate cloud armor example ([ludoo](https://github.com/ludoo)) ### BLUEPRINTS +- [[#941](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/941)] **incompatible change:** Refactor ILB module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#936](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/936)] Enable org policy service and add README notice to modules ([ludoo](https://github.com/ludoo)) +- [[#931](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/931)] **incompatible change:** Refactor compute-mig module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#932](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/932)] feat(project-factory): introduce additive iam bindings to project-fac… ([Malet](https://github.com/Malet)) +- [[#925](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/925)] Network dashboard: update main.tf and README following #922 ([brianhmj](https://github.com/brianhmj)) +- [[#924](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/924)] Fix formatting for gcloud dataflow job launch command ([aymanfarhat](https://github.com/aymanfarhat)) +- [[#921](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/921)] Align documentation, move glb blueprint ([ludoo](https://github.com/ludoo)) +- [[#915](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/915)] TFE OIDC with GCP WIF blueprint added ([averbuks](https://github.com/averbuks)) +- [[#899](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/899)] Static routes monitoring metrics added to network dashboard BP ([maunope](https://github.com/maunope)) +- [[#909](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/909)] GCS2BQ: Move images and templates in sub-folders ([lcaggio](https://github.com/lcaggio)) +- [[#907](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/907)] Fix CloudSQL blueprint ([lcaggio](https://github.com/lcaggio)) - [[#897](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/897)] Project-factory: allow folder_id to be defined in defaults_file ([Malet](https://github.com/Malet)) - [[#900](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/900)] Improve net dashboard variables ([juliocc](https://github.com/juliocc)) - [[#896](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/896)] Network Dashboard: CFv2 and performance improvements ([aurelienlegrand](https://github.com/aurelienlegrand)) @@ -37,6 +49,8 @@ All notable changes to this project will be documented in this file. ### DOCUMENTATION +- [[#937](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/937)] Fix typos in blueprints README.md ([kumar-dhanagopal](https://github.com/kumar-dhanagopal)) +- [[#921](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/921)] Align documentation, move glb blueprint ([ludoo](https://github.com/ludoo)) - [[#898](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/898)] Update FAST bootstrap README.md ([juliocc](https://github.com/juliocc)) - [[#878](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/878)] chore: update cft and fabric ([bharathkkb](https://github.com/bharathkkb)) - [[#863](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/863)] Fabric vs CFT doc ([ludoo](https://github.com/ludoo)) @@ -44,6 +58,11 @@ All notable changes to this project will be documented in this file. ### FAST +- [[#941](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/941)] **incompatible change:** Refactor ILB module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#935](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/935)] FAST: enable org policy API, fix run.allowedIngress value ([ludoo](https://github.com/ludoo)) +- [[#931](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/931)] **incompatible change:** Refactor compute-mig module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#930](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/930)] **incompatible change:** Update organization/folder/project modules to use new org policies API and tf1.3 optionals ([juliocc](https://github.com/juliocc)) +- [[#911](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/911)] FAST: Additional PGA DNS records ([sruffilli](https://github.com/sruffilli)) - [[#903](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/903)] Initial replacement for CI/CD stage ([ludoo](https://github.com/ludoo)) - [[#898](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/898)] Update FAST bootstrap README.md ([juliocc](https://github.com/juliocc)) - [[#880](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/880)] **incompatible change:** Refactor net-vpc module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) @@ -63,6 +82,19 @@ All notable changes to this project will be documented in this file. ### MODULES +- [[#941](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/941)] **incompatible change:** Refactor ILB module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#940](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/940)] Ensure the implementation of org policies is consistent ([juliocc](https://github.com/juliocc)) +- [[#936](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/936)] Enable org policy service and add README notice to modules ([ludoo](https://github.com/ludoo)) +- [[#931](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/931)] **incompatible change:** Refactor compute-mig module for Terraform 1.3 ([ludoo](https://github.com/ludoo)) +- [[#930](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/930)] **incompatible change:** Update organization/folder/project modules to use new org policies API and tf1.3 optionals ([juliocc](https://github.com/juliocc)) +- [[#926](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/926)] Fix backwards compatibility for vpc subnet descriptions ([ludoo](https://github.com/ludoo)) +- [[#927](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/927)] Add support for deployment type and api proxy type for Apigee org ([kmucha555](https://github.com/kmucha555)) +- [[#923](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/923)] Fix service account creation error in gke nodepool module ([ludoo](https://github.com/ludoo)) +- [[#908](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/908)] GKE module: autopilot fixes ([ludoo](https://github.com/ludoo)) +- [[#906](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/906)] GKE module: add managed_prometheus to features ([apichick](https://github.com/apichick)) +- [[#916](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/916)] Add support for DNS routing policies ([juliocc](https://github.com/juliocc)) +- [[#918](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/918)] Fix race condition in SimpleNVA ([sruffilli](https://github.com/sruffilli)) +- [[#914](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/914)] **incompatible change:** Update DNS module ([juliocc](https://github.com/juliocc)) - [[#904](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/904)] Add missing description field ([dsbutler101](https://github.com/dsbutler101)) - [[#891](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/891)] Add internal_ips output to compute-vm module ([LucaPrete](https://github.com/LucaPrete)) - [[#890](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/890)] Add auto_delete and instance_redistribution_type to compute-vm and compute-mig modules. ([giovannibaratta](https://github.com/giovannibaratta)) @@ -95,6 +127,7 @@ All notable changes to this project will be documented in this file. ### TOOLS +- [[#919](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/919)] Rename workflow names ([juliocc](https://github.com/juliocc)) - [[#902](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/902)] Bring back sorted variables check ([juliocc](https://github.com/juliocc)) - [[#887](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/887)] Disable parallel execution of tests and plugin cache ([ludoo](https://github.com/ludoo)) - [[#886](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/886)] Revert "Improve handling of tf plugin cache in tests" ([ludoo](https://github.com/ludoo)) diff --git a/README.md b/README.md index 70d5d666..ee10367a 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This repository provides **end-to-end blueprints** and a **suite of Terraform mo - organization-wide [landing zone blueprint](fast/) used to bootstrap real-world cloud foundations - reference [blueprints](./blueprints/) used to deep dive on network patterns or product features -- a comprehensive source of lean [modules](./modules/dns) that lend themselves well to changes +- a comprehensive source of lean [modules](./modules/) that lend themselves well to changes The whole repository is meant to be cloned as a single unit, and then forked into separate owned repositories to seed production usage, or used as-is and periodically updated as a complete toolkit for prototyping. You can read more on this approach in our [contributing guide](./CONTRIBUTING.md), and a comparison against similar toolkits [here](./FABRIC-AND-CFT.md). @@ -29,16 +29,16 @@ The current list of modules supports most of the core foundational and networkin Currently available modules: -- **foundational** - [folder](./modules/folder), [organization](./modules/organization), [project](./modules/project), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [billing budget](./modules/billing-budget), [projects-data-source](./modules/projects-data-source), [organization-policy](./modules/organization-policy) -- **networking** - [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC peering](./modules/net-vpc-peering), [VPN static](./modules/net-vpn-static), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [NAT](./modules/net-cloudnat), [address reservation](./modules/net-address), [DNS](./modules/dns), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [Service Directory](./modules/service-directory), [Cloud Endpoints](./modules/endpoints) -- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [GKE cluster](./modules/gke-cluster), [GKE nodepool](./modules/gke-nodepool), [GKE hub](./modules/gke-hub), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid) -- **data** - [GCS](./modules/gcs), [BigQuery dataset](./modules/bigquery-dataset), [Pub/Sub](./modules/pubsub), [Datafusion](./modules/datafusion), [Bigtable instance](./modules/bigtable-instance), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag) -- **development** - [Cloud Source Repository](./modules/source-repository), [Container Registry](./modules/container-registry), [Artifact Registry](./modules/artifact-registry), [Apigee Organization](./modules/apigee-organization), [Apigee X Instance](./modules/apigee-x-instance), [API Gateway](./modules/api-gateway) -- **security** - [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc) +- **foundational** - [billing budget](./modules/billing-budget), [Cloud Identity group](./modules/cloud-identity-group/), [folder](./modules/folder), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [organization](./modules/organization), [project](./modules/project), [projects-data-source](./modules/projects-data-source) +- **networking** - [DNS](./modules/dns), [Cloud Endpoints](./modules/endpoints), [address reservation](./modules/net-address), [NAT](./modules/net-cloudnat), [Global Load Balancer (classic)](./modules/net-glb/), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC peering](./modules/net-vpc-peering), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [VPN static](./modules/net-vpn-static), [Service Directory](./modules/service-directory) +- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid), [GKE cluster](./modules/gke-cluster), [GKE hub](./modules/gke-hub), [GKE nodepool](./modules/gke-nodepool) +- **data** - [BigQuery dataset](./modules/bigquery-dataset), [Bigtable instance](./modules/bigtable-instance), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag), [Datafusion](./modules/datafusion), [GCS](./modules/gcs), [Pub/Sub](./modules/pubsub) +- **development** - [API Gateway](./modules/api-gateway), [Apigee Organization](./modules/apigee-organization), [Apigee X Instance](./modules/apigee-x-instance), [Artifact Registry](./modules/artifact-registry), [Container Registry](./modules/container-registry), [Cloud Source Repository](./modules/source-repository) +- **security** - [Binauthz](./modules/binauthz/), [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc) - **serverless** - [Cloud Function](./modules/cloud-function), [Cloud Run](./modules/cloud-run) For more information and usage examples see each module's README file. ## End-to-end blueprints -The [blueprints](./blueprints/) in this repository are split in several main sections: **[networking blueprints](./blueprints/networking/)** that implement core patterns or features, **[data solutions blueprints](./blueprints/data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./blueprints/cloud-operations/)** that leverage specific products to meet specific operational needs and **[factories](./blueprints/factories/)** that implement resource factories for the repetitive creation of specific resources, and finally **[GKE](./blueprints/gke)** and **[serverless](./blueprints/serverless)** design blueprints. +The [blueprints](./blueprints/) in this repository are split in several main sections: **[networking blueprints](./blueprints/networking/)** that implement core patterns or features, **[data solutions blueprints](./blueprints/data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./blueprints/cloud-operations/)** that leverage specific products to meet specific operational needs and **[factories](./blueprints/factories/)** that implement resource factories for the repetitive creation of specific resources, and finally **[GKE](./blueprints/gke)**, **[serverless](./blueprints/serverless)**, and **[third-party solutions](./blueprints/third-party-solutions/)** design blueprints. diff --git a/blueprints/cloud-operations/glb_and_armor/shell_button.png b/assets/images/cloud-shell-button.png similarity index 100% rename from blueprints/cloud-operations/glb_and_armor/shell_button.png rename to assets/images/cloud-shell-button.png diff --git a/blueprints/README.md b/blueprints/README.md index aad7cb08..bfb4c483 100644 --- a/blueprints/README.md +++ b/blueprints/README.md @@ -1,15 +1,15 @@ # Terraform end-to-end blueprints for Google Cloud -This section **[networking blueprints](./networking/)** that implement core patterns or features, **[data solutions blueprints](./data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./cloud-operations/)** that leverage specific products to meet specific operational needs, **[GKE](./gke/)** and **[Serverless](./serverless/)** blueprints, and **[factories](./factories/)** that implement resource factories for the repetitive creation of specific resources. +This section provides **[networking blueprints](./networking/)** that implement core patterns or features, **[data solutions blueprints](./data-solutions/)** that demonstrate how to integrate data services in complete scenarios, **[cloud operations blueprints](./cloud-operations/)** that leverage specific products to meet specific operational needs, **[GKE](./gke/)** and **[Serverless](./serverless/)** blueprints, and **[factories](./factories/)** that implement resource factories for the repetitive creation of specific resources. Currently available blueprints: -- **cloud operations** - [Resource tracking and remediation via Cloud Asset feeds](./cloud-operations/asset-inventory-feed-remediation), [Granular Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Granular Cloud DNS IAM for Shared VPC](./cloud-operations/dns-shared-vpc), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Packer image builder](./cloud-operations/packer-image-builder), [On-prem SA key management](./cloud-operations/onprem-sa-key-management), [TCP healthcheck for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [HTTP Load Balancer with Cloud Armor](./cloud-operations/glb_and_armor) -- **data solutions** - [GCE/GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms/), [Cloud Storage to Bigquery with Cloud Dataflow with least privileges](./data-solutions/gcs-to-bq-with-least-privileges/), [Data Platform Foundations](./data-solutions/data-platform-foundations/), [SQL Server AlwaysOn availability groups blueprint](./data-solutions/sqlserver-alwayson), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion/), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2/) -- **factories** - [The why and the how of resource factories](./factories/README.md) -- **GKE** - [GKE multitenant fleet](./gke/multitenant-fleet/), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [Binary Authorization Pipeline](./gke/binauthz/), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api/) -- **networking** - [hub and spoke via peering](./networking/hub-and-spoke-peering/), [hub and spoke via VPN](./networking/hub-and-spoke-vpn/), [DNS and Google Private Access for on-premises](./networking/onprem-google-access-dns/), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [ILB as next hop](./networking/ilb-next-hop), [Connecting to on-premise services leveraging PSC and hybrid NEGs](./networking/psc-hybrid/), [decentralized firewall](./networking/decentralized-firewall) -- **serverless** - [Multi-region deployments for API Gateway](./serverless/api-gateway/) -- **third party solutions** - [OpenShift cluster on Shared VPC](./third-party-solutions/openshift) +- **cloud operations** - [Active Directory Federation Services](./cloud-operations/adfs), [Cloud Asset Inventory feeds for resource change tracking and remediation](./cloud-operations/asset-inventory-feed-remediation), [Fine-grained Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Cloud DNS & Shared VPC design](./cloud-operations/dns-shared-vpc), [Delegated Role Grants](./cloud-operations/iam-delegated-role-grants), [Networking Dashboard](./cloud-operations/network-dashboard), [Managing on-prem service account keys by uploading public keys](./cloud-operations/onprem-sa-key-management), [Compute Image builder with Hashicorp Packer](./cloud-operations/packer-image-builder), [Packer example](./cloud-operations/packer-image-builder/packer), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Configuring workload identity federation for Terraform Cloud/Enterprise workflow](./cloud-operations/terraform-enterprise-wif), [TCP healthcheck and restart for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [Migrate for Compute Engine (v5) blueprints](./cloud-operations/vm-migration), [Configuring workload identity federation to access Google Cloud resources from apps running on Azure](./cloud-operations/workload-identity-federation) +- **data solutions** - [GCE and GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion), [Data Platform](./data-solutions/data-platform-foundations), [Spinning up a foundation data pipeline on Google Cloud using Cloud Storage, Dataflow and BigQuery](./data-solutions/gcs-to-bq-with-least-privileges), [#SQL Server Always On Groups blueprint](./data-solutions/sqlserver-alwayson), [Data Playground](./data-solutions/data-playground) +- **factories** - [The why and the how of Resource Factories](./factories), [Google Cloud Identity Group Factory](./factories/cloud-identity-group-factory), [Google Cloud BQ Factory](./factories/bigquery-factory), [Google Cloud VPC Firewall Factory](./factories/net-vpc-firewall-yaml), [Minimal Project Factory](./factories/project-factory) +- **GKE** - [Binary Authorization Pipeline Blueprint](./gke/binauthz), [Storage API](./gke/binauthz/image), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api), [GKE Multitenant Blueprint](./gke/multitenant-fleet), [Shared VPC with GKE support](./networking/shared-vpc-gke/) +- **networking** - [Decentralized firewall management](./networking/decentralized-firewall), [Decentralized firewall validator](./networking/decentralized-firewall/validator), [Network filtering with Squid](./networking/filtering-proxy), [HTTP Load Balancer with Cloud Armor](./networking/glb-and-armor), [Hub and Spoke via VPN](./networking/hub-and-spoke-vpn), [Hub and Spoke via VPC Peering](./networking/hub-and-spoke-peering), [Internal Load Balancer as Next Hop](./networking/ilb-next-hop), [Nginx-based reverse proxy cluster](./networking/nginx-reverse-proxy-cluster), [On-prem DNS and Google Private Access](./networking/onprem-google-access-dns), [Calling a private Cloud Function from On-premises](./networking/private-cloud-function-from-onprem), [Hybrid connectivity to on-premise services through PSC](./networking/psc-hybrid), [PSC Producer](./networking/psc-hybrid/psc-producer), [PSC Consumer](./networking/psc-hybrid/psc-consumer), [Shared VPC with optional GKE cluster](./networking/shared-vpc-gke) +- **serverless** - [Creating multi-region deployments for API Gateway](./serverless/api-gateway) +- **third party solutions** - [OpenShift on GCP user-provisioned infrastructure](./third-party-solutions/openshift), [Wordpress deployment on Cloud Run](./third-party-solutions/wordpress/cloudrun) For more information see the individual README files in each section. diff --git a/blueprints/cloud-operations/README.md b/blueprints/cloud-operations/README.md index 36e4c41b..863aee58 100644 --- a/blueprints/cloud-operations/README.md +++ b/blueprints/cloud-operations/README.md @@ -2,6 +2,12 @@ The blueprints in this folder show how to wire together different Google Cloud services to simplify operations, and are meant for testing, or as minimal but sufficiently complete starting points for actual use. +## Active Directory Federation Services + + This [blueprint](./adfs/) Sets up managed AD, creates a server where AD FS will be installed which will also act as admin workstation for AD, and exposes ADFS using GLB. It can also optionally set up a GCP project and VPC if needed + +
+ ## Resource tracking and remediation via Cloud Asset feeds This [blueprint](./asset-inventory-feed-remediation) shows how to leverage [Cloud Asset Inventory feeds](https://cloud.google.com/asset-inventory/docs/monitoring-asset-changes) to stream resource changes in real time, and how to programmatically use the feed change notifications for alerting or remediation, via a Cloud Function wired to the feed PubSub queue. @@ -10,12 +16,6 @@ The blueprint's feed tracks changes to Google Compute instances, and the Cloud F
-## Scheduled Cloud Asset Inventory Export to Bigquery - - This [blueprint](./scheduled-asset-inventory-export-bq) shows how to leverage the [Cloud Asset Inventory Exporting to Bigquery](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery) feature, to keep track of your organization's assets over time storing information in Bigquery. Data stored in Bigquery can then be used for different purposes like dashboarding or analysis. - -
- ## Granular Cloud DNS IAM via Service Directory This [blueprint](./dns-fine-grained-iam) shows how to leverage [Service Directory](https://cloud.google.com/blog/products/networking/introducing-service-directory) and Cloud DNS Service Directory private zones, to implement fine-grained IAM controls on DNS. The blueprint creates a Service Directory namespace, a Cloud DNS private zone that uses it as its authoritative source, service accounts with different levels of permissions, and VMs to test them. @@ -28,37 +28,62 @@ The blueprint's feed tracks changes to Google Compute instances, and the Cloud F
-## Compute Engine quota monitoring - - This [blueprint](./quota-monitoring) shows a practical way of collecting and monitoring [Compute Engine resource quotas](https://cloud.google.com/compute/quotas) via Cloud Monitoring metrics as an alternative to the recently released [built-in quota metrics](https://cloud.google.com/monitoring/alerts/using-quota-metrics). A simple alert on quota thresholds is also part of the blueprint. - -
- ## Delegated Role Grants This [blueprint](./iam-delegated-role-grants) shows how to use delegated role grants to restrict service usage.
+## Network Dashboard + + This [blueprint](./network-dashboard/) provides an end-to-end solution to gather some GCP Networking quotas and limits (that cannot be seen in the GCP console today) and display them in a dashboard. The goal is to allow for better visibility of these limits, facilitating capacity planning and avoiding hitting these limits.. + +
+ +## On-prem Service Account key management + +This [blueprint](./onprem-sa-key-management) shows how to manage IAM Service Account Keys by manually generating a key pair and uploading the public part of the key to GCP. + +
+ ## Packer image builder This [blueprint](./packer-image-builder) shows how to deploy infrastructure for a Compute Engine image builder based on [Hashicorp's Packer tool](https://www.packer.io).
-## On-prem Service Account key management +## Compute Engine quota monitoring - -This [blueprint](./onprem-sa-key-management) shows how to manage IAM Service Account Keys by manually generating a key pair and uploading the public part of the key to GCP. + This [blueprint](./quota-monitoring) shows a practical way of collecting and monitoring [Compute Engine resource quotas](https://cloud.google.com/compute/quotas) via Cloud Monitoring metrics as an alternative to the recently released [built-in quota metrics](https://cloud.google.com/monitoring/alerts/using-quota-metrics). A simple alert on quota thresholds is also part of the blueprint.
-## Migrate for Compute Engine (v5) - This set of [blueprints](./vm-migration) shows how to deploy Migrate for Compute Engine (v5) on top of existing Cloud Foundations on different scenarios. An blueprint on how to deploy the M4CE connector on VMWare ESXi is also part of the blueprints. +## Scheduled Cloud Asset Inventory Export to Bigquery + + This [blueprint](./scheduled-asset-inventory-export-bq) shows how to leverage the [Cloud Asset Inventory Exporting to Bigquery](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery) feature, to keep track of your organization's assets over time storing information in Bigquery. Data stored in Bigquery can then be used for different purposes like dashboarding or analysis. + +
+ +## Workload identity federation for Terraform Enterprise workflow + + This [blueprint](./terraform-enterprise-wif) shows how to configure [Wokload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) between [Terraform Cloud/Enterprise](https://developer.hashicorp.com/terraform/enterprise) instance and Google Cloud.
## TCP healthcheck for unmanaged GCE instances + This [blueprint](./unmanaged-instances-healthcheck) shows how to leverage [Serverless VPC Access](https://cloud.google.com/vpc/docs/configure-serverless-vpc-access) and Cloud Functions to organize a highly performant TCP healtheck for unmanaged GCE instances.
+ +## Migrate for Compute Engine (v5) + + This set of [blueprints](./vm-migration) shows how to deploy Migrate for Compute Engine (v5) on top of existing Cloud Foundations on different scenarios. An blueprint on how to deploy the M4CE connector on VMWare ESXi is also part of the blueprints. + +
+ +## Configuring Workload Identity Federation from apps running on Azure + + This [blueprint](./workload-identity-federation) shows how to set up everything, both in Azure and Google Cloud, so a workload in Azure can access Google Cloud resources without a service account key. This will be possible by configuring workload identity federation to trust access tokens generated for a specific application in an Azure Active Directory (AAD) tenant. + +
diff --git a/blueprints/cloud-operations/adfs/README.md b/blueprints/cloud-operations/adfs/README.md index a690f1ea..0b954884 100644 --- a/blueprints/cloud-operations/adfs/README.md +++ b/blueprints/cloud-operations/adfs/README.md @@ -1,19 +1,19 @@ -# AD FS +# Active Directory Federation Services -This blueprint does the following: +This blueprint does the following: Terraform: - (Optional) Creates a project. - (Optional) Creates a VPC. - Sets up managed AD -- Creates a server where AD FS will be installed. This machine will also act as admin workstation for AD. +- Creates a server where AD FS will be installed. This machine will also act as admin workstation for AD. - Exposes AD FS using GLB. Ansible: - Installs the required Windows features and joins the computer to the AD domain. -- Provisions some tests users, groups and group memberships in AD. The data to provision is in the files directory of the ad-provisioning ansible role. There is script available in the scripts/ad-provisioning folder that you can use to generate an alternative users or memberships file. +- Provisions some tests users, groups and group memberships in AD. The data to provision is in the files directory of the ad-provisioning ansible role. There is script available in the scripts/ad-provisioning folder that you can use to generate an alternative users or memberships file. - Installs AD FS In addition to this, we also include a Powershell script that facilitates the configuration required for Anthos when authenticating users with AD FS as IdP. @@ -26,8 +26,8 @@ The diagram below depicts the architecture of the blueprint: Clone this repository or [open it in cloud shell](https://ssh.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fcloud-foundation-fabric&cloudshell_print=cloud-shell-readme.txt&cloudshell_working_dir=blueprints%2Fcloud-operations%2Fadfs), then go through the following steps to create resources: -* `terraform init` -* `terraform apply -var project_id=my-project-id -var ad_dns_domain_name=my-domain.org -var adfs_dns_domain_name=adfs.my-domain.org` +- `terraform init` +- `terraform apply -var project_id=my-project-id -var ad_dns_domain_name=my-domain.org -var adfs_dns_domain_name=adfs.my-domain.org` Once the resources have been created, do the following: diff --git a/blueprints/cloud-operations/adfs/versions.tf b/blueprints/cloud-operations/adfs/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/adfs/versions.tf +++ b/blueprints/cloud-operations/adfs/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/asset-inventory-feed-remediation/versions.tf b/blueprints/cloud-operations/asset-inventory-feed-remediation/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/asset-inventory-feed-remediation/versions.tf +++ b/blueprints/cloud-operations/asset-inventory-feed-remediation/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/dns-fine-grained-iam/versions.tf b/blueprints/cloud-operations/dns-fine-grained-iam/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/dns-fine-grained-iam/versions.tf +++ b/blueprints/cloud-operations/dns-fine-grained-iam/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/dns-shared-vpc/versions.tf b/blueprints/cloud-operations/dns-shared-vpc/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/dns-shared-vpc/versions.tf +++ b/blueprints/cloud-operations/dns-shared-vpc/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/glb_and_armor/README.md b/blueprints/cloud-operations/glb_and_armor/README.md index 25ffec90..0c9a802e 100644 --- a/blueprints/cloud-operations/glb_and_armor/README.md +++ b/blueprints/cloud-operations/glb_and_armor/README.md @@ -2,7 +2,7 @@ ## Introduction -This repository contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. +This blueprint contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. This tutorial is general enough to fit in a variety of use-cases, from hosting a mobile app's backend to deploy proprietary workloads at scale. @@ -62,7 +62,7 @@ Note: To grant a user a role, take a look at the [Granting and Revoking Access]( Click on the button below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](shell_button.png)](https://goo.gle/GoCloudArmor) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoCloudArmor) This will clone the repository to your cloud shell and a screen like this one will appear: diff --git a/blueprints/cloud-operations/glb_and_armor/main.tf b/blueprints/cloud-operations/glb_and_armor/main.tf index 6e43bc15..83622609 100644 --- a/blueprints/cloud-operations/glb_and_armor/main.tf +++ b/blueprints/cloud-operations/glb_and_armor/main.tf @@ -153,22 +153,20 @@ module "vm_siege" { } module "mig_ew1" { - source = "../../../modules/compute-mig" - project_id = module.project.project_id - location = "europe-west1" - name = "${local.prefix}europe-west1-mig" - regional = true - default_version = { - instance_template = module.instance_template_ew1.template.self_link - name = "default" - } + source = "../../../modules/compute-mig" + project_id = module.project.project_id + location = "europe-west1" + name = "${local.prefix}europe-west1-mig" + instance_template = module.instance_template_ew1.template.self_link autoscaler_config = { - max_replicas = 5 - min_replicas = 1 - cooldown_period = 45 - cpu_utilization_target = 0.8 - load_balancing_utilization_target = null - metric = null + max_replicas = 5 + min_replicas = 1 + cooldown_period = 45 + scaling_signals = { + cpu_utilization = { + target = 0.65 + } + } } named_ports = { http = 80 @@ -179,22 +177,20 @@ module "mig_ew1" { } module "mig_ue1" { - source = "../../../modules/compute-mig" - project_id = module.project.project_id - location = "us-east1" - name = "${local.prefix}us-east1-mig" - regional = true - default_version = { - instance_template = module.instance_template_ue1.template.self_link - name = "default" - } + source = "../../../modules/compute-mig" + project_id = module.project.project_id + location = "us-east1" + name = "${local.prefix}us-east1-mig" + instance_template = module.instance_template_ue1.template.self_link autoscaler_config = { - max_replicas = 5 - min_replicas = 1 - cooldown_period = 45 - cpu_utilization_target = 0.8 - load_balancing_utilization_target = null - metric = null + max_replicas = 5 + min_replicas = 1 + cooldown_period = 45 + scaling_signals = { + cpu_utilization = { + target = 0.65 + } + } } named_ports = { http = 80 diff --git a/blueprints/cloud-operations/iam-delegated-role-grants/versions.tf b/blueprints/cloud-operations/iam-delegated-role-grants/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/iam-delegated-role-grants/versions.tf +++ b/blueprints/cloud-operations/iam-delegated-role-grants/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/network-dashboard/README.md b/blueprints/cloud-operations/network-dashboard/README.md index 4253f806..c835dc01 100644 --- a/blueprints/cloud-operations/network-dashboard/README.md +++ b/blueprints/cloud-operations/network-dashboard/README.md @@ -15,15 +15,19 @@ Three metric descriptors are created for each monitored resource: usage, limit a Clone this repository, then go through the following steps to create resources: - Create a terraform.tfvars file with the following content: - - organization_id = "" - - billing_account = "" - - monitoring_project_id = "project-0" # Monitoring project where the dashboard will be created and the solution deployed - - monitored_projects_list = ["project-1", "project2"] # Projects to be monitored by the solution - - monitored_folders_list = ["folder_id"] # Folders to be monitored by the solution - - v2 = true|false # Set to true to use V2 Cloud Functions environment + ```tfvars + organization_id = "" + billing_account = "" + monitoring_project_id = "project-0" # Monitoring project where the dashboard will be created and the solution deployed + monitored_projects_list = ["project-1", "project2"] # Projects to be monitored by the solution + monitored_folders_list = ["folder_id"] # Folders to be monitored by the solution + v2 = false # Set to true to use V2 Cloud Functions environment + ``` - `terraform init` - `terraform apply` +Note: Org level viewing permission is required for some metrics such as firewall policies. + Once the resources are deployed, go to the following page to see the dashboard: https://console.cloud.google.com/monitoring/dashboards?project=. A dashboard called "quotas-utilization" should be created. @@ -46,22 +50,33 @@ The Cloud Function currently tracks usage, limit and utilization of: - internal forwarding rules for internal L7 load balancers per VPC - internal forwarding rules for internal L4 load balancers per VPC peering group - internal forwarding rules for internal L7 load balancers per VPC peering group -- Dynamic routes per VPC -- Dynamic routes per VPC peering group +- Dynamic routes per VPC +- Dynamic routes per VPC peering group +- Static routes per project (VPC drill down is available for usage) +- Static routes per VPC peering group - IP utilization per subnet (% of IP addresses used in a subnet) - VPC firewall rules per project (VPC drill down is available for usage) - Tuples per Firewall Policy It writes this values to custom metrics in Cloud Monitoring and creates a dashboard to visualize the current utilization of these metrics in Cloud Monitoring. -Note that metrics are created in the cloud-function/metrics.yaml file. +Note that metrics are created in the cloud-function/metrics.yaml file. You can also edit default limits for a specific network in that file. See the example for `vpc_peering_per_network`. + +## Assumptions and limitations +- The CF assumes that all VPCs in peering groups are within the same organization, except for PSA peerings +- The CF will only fetch subnet utilization data from the PSA peerings (not the VMs, ILB or routes usage) +- The CF assumes global routing is ON, this impacts dynamic routes usage calculation +- The CF assumes custom routes importing/exporting is ON, this impacts static and dynamic routes usage calculation +- The CF assumes all networks in peering groups have the same global routing and custom routes sharing configuration -You can also edit default limits for a specific network in that file. See the example for `vpc_peering_per_network`. ## Next steps and ideas In a future release, we could support: -- Static routes per VPC / per VPC peering group - Google managed VPCs that are peered with PSA (such as Cloud SQL or Memorystore) +- Dynamic routes calculation for VPCs/PPGs with "global routing" set to OFF +- Static routes calculation for projects/PPGs with "custom routes importing/exporting" set to OFF +- Calculations for cross Organization peering groups +- Support different scopes (reduced and fine-grained) If you are interested in this and/or would like to contribute, please contact legranda@google.com. diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/main.py b/blueprints/cloud-operations/network-dashboard/cloud-function/main.py index fafb26b0..83e93fbb 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/main.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/main.py @@ -163,6 +163,9 @@ def main(event, context=None): l4_forwarding_rules_dict = ilb_fwrules.get_forwarding_rules_dict(config, "L4") l7_forwarding_rules_dict = ilb_fwrules.get_forwarding_rules_dict(config, "L7") subnet_range_dict = networks.get_subnet_ranges_dict(config) + static_routes_dict = routes.get_static_routes_dict(config) + dynamic_routes_dict = routes.get_dynamic_routes( + config, metrics_dict, limits_dict['dynamic_routes_per_network_limit']) try: @@ -181,10 +184,12 @@ def main(event, context=None): ilb_fwrules.get_forwarding_rules_data( config, metrics_dict, l7_forwarding_rules_dict, limits_dict['internal_forwarding_rules_l7_limit'], "L7") + + routes.get_static_routes_data(config, metrics_dict, static_routes_dict, + project_quotas_dict) + peerings.get_vpc_peering_data(config, metrics_dict, limits_dict['number_of_vpc_peerings_limit']) - dynamic_routes_dict = routes.get_dynamic_routes( - config, metrics_dict, limits_dict['dynamic_routes_per_network_limit']) # Per VPC peering group metrics metrics.get_pgg_data( @@ -207,7 +212,13 @@ def main(event, context=None): ["subnet_ranges_per_peering_group"], subnet_range_dict, config["limit_names"]["SUBNET_RANGES"], limits_dict['number_of_subnet_IP_ranges_ppg_limit']) - routes.get_dynamic_routes_ppg( + #static + routes.get_routes_ppg( + config, metrics_dict["metrics_per_peering_group"] + ["static_routes_per_peering_group"], static_routes_dict, + limits_dict['static_routes_per_peering_group_limit']) + #dynamic + routes.get_routes_ppg( config, metrics_dict["metrics_per_peering_group"] ["dynamic_routes_per_peering_group"], dynamic_routes_dict, limits_dict['dynamic_routes_per_peering_group_limit']) diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics.yaml b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics.yaml index b87dc1c3..0c5bb8cc 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics.yaml +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics.yaml @@ -99,6 +99,19 @@ metrics_per_network: utilization: name: dynamic_routes_per_network_utilization description: Number of Dynamic routes per network - utilization. + #static routes limit is per project, but usage is per network + static_routes_per_project: + usage: + name: static_routes_per_project_vpc_usage + description: Number of Static routes per project and network - usage. + limit: + name: static_routes_per_project_limit + description: Number of Static routes per project - limit. + values: + default_value: 250 + utilization: + name: static_routes_per_project_utilization + description: Number of Static routes per project - utilization. metrics_per_peering_group: l4_forwarding_rules_per_peering_group: usage: @@ -160,6 +173,18 @@ metrics_per_peering_group: utilization: name: dynamic_routes_per_peering_group_utilization description: Number of Dynamic routes per peering group - utilization. + static_routes_per_peering_group: + usage: + name: static_routes_per_peering_group_usage + description: Number of Static routes per peering group - usage. + limit: + name: static_routes_per_peering_group_limit + description: Number of Static routes per peering group - limit. + values: + default_value: 300 + utilization: + name: static_routes_per_peering_group_utilization + description: Number of Static routes per peering group - utilization. metrics_per_project: firewalls: usage: diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/firewall_policies.py b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/firewall_policies.py index ef4c2518..95a26db3 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/firewall_policies.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/firewall_policies.py @@ -26,8 +26,8 @@ from . import metrics, networks, limits def get_firewall_policies_dict(config: dict): ''' - Calls the Asset Inventory API to get all Firewall Policies under the GCP organization - + Calls the Asset Inventory API to get all Firewall Policies under the GCP organization, including children + Ignores monitored projects list: returns all policies regardless of their parent resource Parameters: config (dict): The dict containing config like clients and limits Returns: @@ -55,8 +55,8 @@ def get_firewall_policies_dict(config: dict): def get_firewal_policies_data(config, metrics_dict, firewall_policies_dict): ''' - Gets the data for VPC Firewall lorem ipsum - + Gets the data for VPC Firewall Policies in an organization, including children. All folders are considered, + only projects in the monitored projects list are considered. Parameters: config (dict): The dict containing config like clients and limits metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions. @@ -91,6 +91,9 @@ def get_firewal_policies_data(config, metrics_dict, firewall_policies_dict): parent_type = re.search("(^\w+)", firewall_policy["parent"]).group( 1) if "parent" in firewall_policy else "projects" + if parent_type == "projects" and parent not in config["monitored_projects"]: + continue + metric_labels = {'parent': parent, 'parent_type': parent_type} metric_labels["name"] = firewall_policy[ diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/limits.py b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/limits.py index 5c5bfc73..8987b4cb 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/limits.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/limits.py @@ -42,7 +42,7 @@ def get_quotas_dict(quotas_list): def get_quota_project_limit(config, regions=["global"]): ''' - Retrieves limit for a specific project quota + Retrieves quotas for all monitored project in selected regions, default 'global' Parameters: project_link (string): Project link. Returns: @@ -158,7 +158,7 @@ def get_quota_current_limit(config, project_link, metric_name): def count_effective_limit(config, project_id, network_dict, usage_metric_name, limit_metric_name, utilization_metric_name, - limit_dict): + limit_dict, timestamp=None): ''' Calculates the effective limits (using algorithm in the link below) for peering groups and writes data (usage, limit, utilization) to the custom metrics. Source: https://cloud.google.com/vpc/docs/quota#vpc-peering-effective-limit @@ -171,11 +171,13 @@ def count_effective_limit(config, project_id, network_dict, usage_metric_name, limit_metric_name (string): Name of the custom metric to be populated for limit per VPC peering group. utilization_metric_name (string): Name of the custom metric to be populated for utilization per VPC peering group. limit_dict (dictionary of string:int): Dictionary containing the limit per peering group (either VPC specific or default limit). + timestamp (time): timestamp to be recorded for all points Returns: None ''' - timestamp = time.time() + if timestamp == None: + timestamp = time.time() if network_dict['peerings'] == []: return diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/metrics.py b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/metrics.py index 2ae54c69..b26e30d4 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/metrics.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/metrics.py @@ -91,7 +91,8 @@ def create_metric(metric_name, description, monitoring_project, config): def append_data_to_series_buffer(config, metric_name, metric_value, metric_labels, timestamp=None): ''' - Writes data to Cloud Monitoring custom metrics. + Appends data to Cloud Monitoring custom metrics, using a buffer. buffer is flushed every BUFFER_LEN elements, + any unflushed series is discarded upon function closure Parameters: config (dict): The dict containing config like clients and limits metric_name (string): Name of the metric @@ -139,7 +140,7 @@ def append_data_to_series_buffer(config, metric_name, metric_value, def flush_series_buffer(config): ''' - writes buffered metrics to Google Cloud Monitoring, empties buffer upon failure + writes buffered metrics to Google Cloud Monitoring, empties buffer upon both failure/success config (dict): The dict containing config like clients and limits ''' try: @@ -188,6 +189,7 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict): current_quota_limit_view = customize_quota_view(current_quota_limit) + timestamp = time.time() # For each network in this GCP project for network_dict in network_dict_list: if network_dict['network_id'] == 0: @@ -238,7 +240,7 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict): metric_dict["usage"]["name"], metric_dict["limit"]["name"], metric_dict["utilization"]["name"], - limit_dict) + limit_dict, timestamp) print( f"Buffered {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project_id}" ) diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/routes.py b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/routes.py index 8bca3496..a1614545 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/routes.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/routes.py @@ -17,6 +17,7 @@ import time from collections import defaultdict +from google.protobuf import field_mask_pb2 from . import metrics, networks, limits, peerings, routers @@ -78,8 +79,8 @@ def get_routes_for_network(config, network_link, project_id, routers_dict): def get_dynamic_routes(config, metrics_dict, limits_dict): ''' - Writes all dynamic routes per VPC to custom metrics. - + This function gets the usage, limit and utilization for the dynamic routes per VPC + note: assumes global routing is ON for all VPCs Parameters: config (dict): The dict containing config like clients and limits metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions. @@ -128,10 +129,10 @@ def get_dynamic_routes(config, metrics_dict, limits_dict): return dynamic_routes_dict -def get_dynamic_routes_ppg(config, metric_dict, usage_dict, limit_dict): +def get_routes_ppg(config, metric_dict, usage_dict, limit_dict): ''' - This function gets the usage, limit and utilization for the dynamic routes per VPC peering group. - + This function gets the usage, limit and utilization for the static or dynamic routes per VPC peering group. + note: assumes global routing is ON for all VPCs for dynamic routes, assumes share custom routes is on for all peered networks Parameters: config (dict): The dict containing config like clients and limits metric_dict (dictionary of string: string): Dictionary with the metric names and description, that will be used to populate the metrics @@ -140,11 +141,12 @@ def get_dynamic_routes_ppg(config, metric_dict, usage_dict, limit_dict): Returns: None ''' - for project in config["monitored_projects"]: - network_dict_list = peerings.gather_peering_data(config, project) + timestamp = time.time() + for project_id in config["monitored_projects"]: + network_dict_list = peerings.gather_peering_data(config, project_id) for network_dict in network_dict_list: - network_link = f"https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network_dict['network_name']}" + network_link = f"https://www.googleapis.com/compute/v1/projects/{project_id}/global/networks/{network_dict['network_name']}" limit = limits.get_ppg(network_link, limit_dict) @@ -169,11 +171,119 @@ def get_dynamic_routes_ppg(config, metric_dict, usage_dict, limit_dict): peered_network_dict["usage"] = peered_usage peered_network_dict["limit"] = peered_limit - limits.count_effective_limit(config, project, network_dict, + limits.count_effective_limit(config, project_id, network_dict, metric_dict["usage"]["name"], metric_dict["limit"]["name"], metric_dict["utilization"]["name"], - limit_dict) + limit_dict, timestamp) print( - f"Wrote {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project}" + f"Buffered {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project_id}" ) + + +def get_static_routes_dict(config): + ''' + Calls the Asset Inventory API to get all static custom routes under the GCP organization. + Parameters: + config (dict): The dict containing config like clients and limits + Returns: + routes_per_vpc_dict (dictionary of string: int): Keys are the network links and values are the number of custom static routes per network. + ''' + routes_per_vpc_dict = defaultdict() + usage_dict = defaultdict() + + read_mask = field_mask_pb2.FieldMask() + read_mask.FromJsonString('name,versionedResources') + + response = config["clients"]["asset_client"].search_all_resources( + request={ + "scope": f"organizations/{config['organization']}", + "asset_types": ["compute.googleapis.com/Route"], + "read_mask": read_mask + }) + + for resource in response: + for versioned in resource.versioned_resources: + static_route = dict() + for field_name, field_value in versioned.resource.items(): + static_route[field_name] = field_value + static_route["project_id"] = static_route["network"].split('/')[6] + static_route["network_name"] = static_route["network"].split('/')[-1] + network_link = f"https://www.googleapis.com/compute/v1/projects/{static_route['project_id']}/global/networks/{static_route['network_name']}" + #exclude default vpc and peering routes, dynamic routes are not in Cloud Asset Inventory + if "nextHopPeering" not in static_route and "nextHopNetwork" not in static_route: + if network_link not in routes_per_vpc_dict: + routes_per_vpc_dict[network_link] = dict() + routes_per_vpc_dict[network_link]["project_id"] = static_route[ + "project_id"] + routes_per_vpc_dict[network_link]["network_name"] = static_route[ + "network_name"] + if static_route["destRange"] not in routes_per_vpc_dict[network_link]: + routes_per_vpc_dict[network_link][static_route["destRange"]] = {} + if "usage" not in routes_per_vpc_dict[network_link]: + routes_per_vpc_dict[network_link]["usage"] = 0 + routes_per_vpc_dict[network_link][ + "usage"] = routes_per_vpc_dict[network_link]["usage"] + 1 + + #output a dict with network links and usage only + return { + network_link_out: routes_per_vpc_dict[network_link_out]["usage"] + for network_link_out in routes_per_vpc_dict + } + + +def get_static_routes_data(config, metrics_dict, static_routes_dict, + project_quotas_dict): + ''' + Determines and writes the number of static routes for each VPC in monitored projects, the per project limit and the per project utilization + note: assumes custom routes sharing is ON for all VPCs + Parameters: + config (dict): The dict containing config like clients and limits + metric_dict (dictionary of string: string): Dictionary with the metric names and description, that will be used to populate the metrics + static_routes_dict (dictionary of dictionary: int): Keys are the network links and values are the number of custom static routes per network. + project_quotas_dict (dictionary of string:int): Dictionary with the network link as key and the limit as value. + Returns: + None + ''' + timestamp = time.time() + project_usage = {project: 0 for project in config["monitored_projects"]} + + #usage is drilled down by network + for network_link in static_routes_dict: + + project_id = network_link.split('/')[6] + if (project_id not in config["monitored_projects"]): + continue + network_name = network_link.split('/')[-1] + + project_usage[project_id] = project_usage[project_id] + static_routes_dict[ + network_link] + + metric_labels = {"project": project_id, "network_name": network_name} + metrics.append_data_to_series_buffer( + config, metrics_dict["metrics_per_network"]["static_routes_per_project"] + ["usage"]["name"], static_routes_dict[network_link], metric_labels, + timestamp=timestamp) + + #limit and utilization are calculated by project + for project_id in project_usage: + current_quota_limit = project_quotas_dict[project_id]['global']["routes"][ + "limit"] + if current_quota_limit is None: + print( + f"Could not determine static routes metric for projects/{project_id} due to missing quotas" + ) + continue + # limit and utilization are calculted by project + metric_labels = {"project": project_id} + metrics.append_data_to_series_buffer( + config, metrics_dict["metrics_per_network"]["static_routes_per_project"] + ["limit"]["name"], current_quota_limit, metric_labels, + timestamp=timestamp) + metrics.append_data_to_series_buffer( + config, metrics_dict["metrics_per_network"]["static_routes_per_project"] + ["utilization"]["name"], + project_usage[project_id] / current_quota_limit, metric_labels, + timestamp=timestamp) + + return diff --git a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/vpc_firewalls.py b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/vpc_firewalls.py index 3b10b0e3..f9fec79a 100644 --- a/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/vpc_firewalls.py +++ b/blueprints/cloud-operations/network-dashboard/cloud-function/metrics/vpc_firewalls.py @@ -84,7 +84,7 @@ def get_firewalls_data(config, metrics_dict, project_quotas_dict, current_quota_limit = project_quotas_dict[project_id]['global']["firewalls"] if current_quota_limit is None: print( - f"Could not determine VPC firewal rules to metric for projects/{project_id} due to missing quotas" + f"Could not determine VPC firewal rules metric for projects/{project_id} due to missing quotas" ) continue diff --git a/blueprints/cloud-operations/network-dashboard/dashboards/quotas-utilization.json b/blueprints/cloud-operations/network-dashboard/dashboards/quotas-utilization.json index 0e71bb7f..c9eb8bd1 100644 --- a/blueprints/cloud-operations/network-dashboard/dashboards/quotas-utilization.json +++ b/blueprints/cloud-operations/network-dashboard/dashboards/quotas-utilization.json @@ -1,6 +1,6 @@ { "category": "CUSTOM", - "displayName": "quotas_utilization_updated", + "displayName": "quotas_utilization", "mosaicLayout": { "columns": 12, "tiles": [ @@ -22,13 +22,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/internal_forwarding_rules_l4_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "1800s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -64,13 +62,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/internal_forwarding_rules_l7_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -106,13 +102,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/number_of_instances_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -148,13 +142,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/number_of_vpc_peerings_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -190,13 +182,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/number_of_active_vpc_peerings_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_INTERPOLATE" } } @@ -232,13 +222,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/number_of_subnet_IP_ranges_ppg_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -274,13 +262,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/internal_forwarding_rules_l4_ppg_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -316,13 +302,11 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/internal_forwarding_rules_l7_ppg_utilization\" resource.type=\"global\"", "secondaryAggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" } } @@ -358,7 +342,6 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "3600s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_NEXT_OLDER" }, "filter": "metric.type=\"custom.googleapis.com/number_of_instances_ppg_utilization\" resource.type=\"global\"" @@ -395,7 +378,6 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" }, "filter": "metric.type=\"custom.googleapis.com/dynamic_routes_per_network_utilization\" resource.type=\"global\"" @@ -452,7 +434,7 @@ }, "width": 6, "xPos": 0, - "yPos": 24 + "yPos": 32 }, { "height": 4, @@ -492,7 +474,7 @@ }, "width": 6, "xPos": 6, - "yPos": 24 + "yPos": 32 }, { "height": 4, @@ -512,7 +494,6 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" }, "filter": "metric.type=\"custom.googleapis.com/firewall_policy_tuples_per_policy_utilization\" resource.type=\"global\"" @@ -528,7 +509,7 @@ } }, "width": 6, - "xPos": 0, + "xPos": 6, "yPos": 28 }, { @@ -549,7 +530,6 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" }, "filter": "metric.type=\"custom.googleapis.com/ip_addresses_per_subnet_utilization\" resource.type=\"global\"" @@ -586,7 +566,6 @@ "timeSeriesFilter": { "aggregation": { "alignmentPeriod": "60s", - "crossSeriesReducer": "REDUCE_NONE", "perSeriesAligner": "ALIGN_MEAN" }, "filter": "metric.type=\"custom.googleapis.com/dynamic_routes_per_peering_group_utilization\" resource.type=\"global\"" @@ -604,6 +583,124 @@ "width": 6, "xPos": 6, "yPos": 20 + }, + { + "height": 4, + "widget": { + "title": "static_routes_per_project_vpc_usage", + "xyChart": { + "chartOptions": { + "mode": "COLOR" + }, + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "apiSource": "DEFAULT_CLOUD", + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "crossSeriesReducer": "REDUCE_SUM", + "groupByFields": [ + "metric.label.\"project\"" + ], + "perSeriesAligner": "ALIGN_MEAN" + }, + "filter": "metric.type=\"custom.googleapis.com/static_routes_per_project_vpc_usage\" resource.type=\"global\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_NONE" + } + } + } + } + ], + "thresholds": [], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + "width": 6, + "xPos": 0, + "yPos": 24 + }, + { + "height": 4, + "widget": { + "title": "static_routes_per_ppg_utilization", + "xyChart": { + "chartOptions": { + "mode": "COLOR" + }, + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "apiSource": "DEFAULT_CLOUD", + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + }, + "filter": "metric.type=\"custom.googleapis.com/static_routes_per_peering_group_utilization\" resource.type=\"global\"" + } + } + } + ], + "thresholds": [], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + "width": 6, + "xPos": 0, + "yPos": 28 + }, + { + "height": 4, + "widget": { + "title": "static_routes_per_project_utilization", + "xyChart": { + "chartOptions": { + "mode": "COLOR" + }, + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "apiSource": "DEFAULT_CLOUD", + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + }, + "filter": "metric.type=\"custom.googleapis.com/static_routes_per_project_utilization\" resource.type=\"global\"" + } + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + "width": 6, + "xPos": 6, + "yPos": 24 } ] } diff --git a/blueprints/cloud-operations/network-dashboard/main.tf b/blueprints/cloud-operations/network-dashboard/main.tf index 00ca7bb2..b5edfb5c 100644 --- a/blueprints/cloud-operations/network-dashboard/main.tf +++ b/blueprints/cloud-operations/network-dashboard/main.tf @@ -50,7 +50,6 @@ module "service-account-function" { # Required IAM permissions for this service account are: # 1) compute.networkViewer on projects to be monitored (I gave it at organization level for now for simplicity) # 2) monitoring viewer on the projects to be monitored (I gave it at organization level for now for simplicity) - # 3) if you dont have permission to create service account and assign permission at organization Level, move these 3 roles to project level. iam_organization_roles = { "${var.organization_id}" = [ @@ -184,4 +183,4 @@ module "cloud-function" { resource "google_monitoring_dashboard" "dashboard" { dashboard_json = file("${path.module}/dashboards/quotas-utilization.json") project = local.monitoring_project -} \ No newline at end of file +} diff --git a/modules/organization-policy/versions.tf b/blueprints/cloud-operations/network-dashboard/versions.tf similarity index 91% rename from modules/organization-policy/versions.tf rename to blueprints/cloud-operations/network-dashboard/versions.tf index b1c8c910..3bdf2337 100644 --- a/modules/organization-policy/versions.tf +++ b/blueprints/cloud-operations/network-dashboard/versions.tf @@ -17,13 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } - - diff --git a/blueprints/cloud-operations/onprem-sa-key-management/versions.tf b/blueprints/cloud-operations/onprem-sa-key-management/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/onprem-sa-key-management/versions.tf +++ b/blueprints/cloud-operations/onprem-sa-key-management/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/packer-image-builder/versions.tf b/blueprints/cloud-operations/packer-image-builder/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/packer-image-builder/versions.tf +++ b/blueprints/cloud-operations/packer-image-builder/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/quota-monitoring/versions.tf b/blueprints/cloud-operations/quota-monitoring/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/quota-monitoring/versions.tf +++ b/blueprints/cloud-operations/quota-monitoring/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/scheduled-asset-inventory-export-bq/versions.tf b/blueprints/cloud-operations/scheduled-asset-inventory-export-bq/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/cloud-operations/scheduled-asset-inventory-export-bq/versions.tf +++ b/blueprints/cloud-operations/scheduled-asset-inventory-export-bq/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/README.md new file mode 100644 index 00000000..4bb282c5 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/README.md @@ -0,0 +1,115 @@ +# Configuring workload identity federation for Terraform Cloud/Enterprise workflow + +The most common way to use Terraform Cloud for GCP deployments is to store a GCP Service Account Key as a part of TFE Workflow configuration, as we all know there are security risks due to the fact that keys are long term credentials that could be compromised. + +Workload identity federation enables applications running outside of Google Cloud to replace long-lived service account keys with short-lived access tokens. This is achieved by configuring Google Cloud to trust an external identity provider, so applications can use the credentials issued by the external identity provider to impersonate a service account. + +This blueprint shows how to set up [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) between [Terraform Cloud/Enterprise](https://developer.hashicorp.com/terraform/enterprise) instance and Google Cloud. This will be possible by configuring workload identity federation to trust oidc tokens generated for a specific workflow in a Terraform Enterprise organization. + +The following diagram illustrates how the VM will get a short-lived access token and use it to access a resource: + + ![Sequence diagram](diagram.png) + +## Running the blueprint + +### Create Terraform Enterprise Workflow +If you don't have an existing Terraform Enterprise organization you can sign up for a [free trial](https://app.terraform.io/public/signup/account) account. + +Create a new Workspace for a `CLI-driven workflow` (Identity Federation will work for any workflow type, but for simplicity of the blueprint we use CLI driven workflow). + +Note workspace name and id (id starts with `ws-`), we will use them on a later stage. + +Go to the organization settings and note the org name and id (id starts with `org-`). + +### Deploy GCP Workload Identity Pool Provider for Terraform Enterprise + +> **_NOTE:_** This is a preparation part and should be executed on behalf of a user with enough permissions. + +Required permissions when new project is created: + - Project Creator on the parent folder/org. + + Required permissions when an existing project is used: + - Workload Identity Admin on the project level + - Project IAM Admin on the project level + +Fill out required variables, use TFE Org and Workspace IDs from the previous steps (IDs are not the names). +```bash +cd gcp-workload-identity-provider + +mv terraform.auto.tfvars.template terraform.auto.tfvars + +vi terraform.auto.tfvars +``` + +Authenticate using application default credentials, execute terraform code and deploy resources +``` +gcloud auth application-default login + +terraform init + +terraform apply +``` + +As a result a set of outputs will be provided (your values will be different), note the output since we will use it on the next steps. + +``` +impersonate_service_account_email = "sa-tfe@fe-test-oidc.iam.gserviceaccount.com" +project_id = "tfe-test-oidc" +workload_identity_audience = "//iam.googleapis.com/projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +workload_identity_pool_provider_id = "projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +``` + +### Configure OIDC provider for your TFE Workflow + +To enable OIDC for a TFE workflow it's enough to setup an environment variable `TFC_WORKLOAD_IDENTITY_AUDIENCE`. + +Go the the Workflow -> Variables and add a new variable `TFC_WORKLOAD_IDENTITY_AUDIENCE` equal to the value of `workload_identity_audience` output, in our example it's: + +``` +TFC_WORKLOAD_IDENTITY_AUDIENCE = "//iam.googleapis.com/projects/476538149566/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +``` + +At that point we setup GCP Identity Federation to trust TFE generated OIDC tokens, so the TFE workflow can use the token to impersonate a GCP Service Account. + +## Testing the blueprint + +In order to test the setup we will deploy a GCS bucket from TFE Workflow using OIDC token for Service Account Impersonation. + +### Configure backend and variables + +First, we need to configure TFE Remote backend for our testing terraform code, use TFE Organization name and workspace name (names are not the same as ids) + +``` +cd ../tfc-workflow-using-wif + +mv backend.tf.template backend.tf + + +vi backend.tf + +``` + +Fill out variables based on the output from the preparation steps: + +``` +mv terraform.auto.tfvars.template terraform.auto.tfvars + +vi terraform.auto.tfvars + +``` + +### Authenticate terraform for triggering CLI-driven workflow + +Follow this [documentation](https://learn.hashicorp.com/tutorials/terraform/cloud-login) to login ti terraform cloud from the CLI. + +### Trigger the workflow + +``` +terraform init + +terraform apply +``` + +As a result we have a successfully deployed GCS bucket from Terraform Enterprise workflow using Workload Identity Federation. + +Once done testing, you can clean up resources by running `terraform destroy` first in the `tfc-workflow-using-wif` and then `gcp-workload-identity-provider` folders. diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png b/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png new file mode 100644 index 00000000..d4e6f82e Binary files /dev/null and b/blueprints/cloud-operations/terraform-enterprise-wif/diagram.png differ diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md new file mode 100644 index 00000000..35198e8d --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/README.md @@ -0,0 +1,33 @@ +# GCP Workload Identity Provider for Terraform Enterprise + +This terraform code is a part of [GCP Workload Identity Federation for Terraform Enterprise](../) blueprint. + +The codebase provisions the following list of resources: + +- GCS Bucket + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [billing_account](variables.tf#L16) | Billing account id used as default for new projects. | string | ✓ | | +| [project_id](variables.tf#L43) | Existing project id. | string | ✓ | | +| [tfe_organization_id](variables.tf#L48) | TFE organization id. | string | ✓ | | +| [tfe_workspace_id](variables.tf#L53) | TFE workspace id. | string | ✓ | | +| [issuer_uri](variables.tf#L21) | Terraform Enterprise uri. Replace the uri if a self hosted instance is used. | string | | "https://app.terraform.io/" | +| [parent](variables.tf#L27) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | string | | null | +| [project_create](variables.tf#L37) | Create project instead of using an existing one. | bool | | true | +| [workload_identity_pool_id](variables.tf#L58) | Workload identity pool id. | string | | "tfe-pool" | +| [workload_identity_pool_provider_id](variables.tf#L64) | Workload identity pool provider id. | string | | "tfe-provider" | + +## Outputs + +| name | description | sensitive | +|---|---|:---:| +| [impersonate_service_account_email](outputs.tf#L16) | Service account to be impersonated by workload identity. | | +| [project_id](outputs.tf#L21) | GCP Project ID. | | +| [workload_identity_audience](outputs.tf#L26) | TFC Workload Identity Audience. | | +| [workload_identity_pool_provider_id](outputs.tf#L31) | GCP workload identity pool provider ID. | | + + diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf new file mode 100644 index 00000000..5ced2e3c --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/main.tf @@ -0,0 +1,91 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +############################################################################### +# GCP PROJECT # +############################################################################### + +module "project" { + source = "../../../../modules/project" + name = var.project_id + project_create = var.project_create + parent = var.parent + billing_account = var.billing_account + services = [ + "iam.googleapis.com", + "cloudresourcemanager.googleapis.com", + "iamcredentials.googleapis.com", + "sts.googleapis.com", + "storage.googleapis.com" + ] +} + +############################################################################### +# Workload Identity Pool and Provider # +############################################################################### + +resource "google_iam_workload_identity_pool" "tfe-pool" { + project = module.project.project_id + workload_identity_pool_id = var.workload_identity_pool_id + display_name = "TFE Pool" + description = "Identity pool for Terraform Enterprise OIDC integration" +} + +resource "google_iam_workload_identity_pool_provider" "tfe-pool-provider" { + project = module.project.project_id + workload_identity_pool_id = google_iam_workload_identity_pool.tfe-pool.workload_identity_pool_id + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + display_name = "TFE Pool Provider" + description = "OIDC identity pool provider for TFE Integration" + # Use condition to make sure only token generated for a specific TFE Org can be used across org workspaces + attribute_condition = "attribute.terraform_organization_id == \"${var.tfe_organization_id}\"" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.aud" = "assertion.aud" + "attribute.terraform_run_phase" = "assertion.terraform_run_phase" + "attribute.terraform_workspace_id" = "assertion.terraform_workspace_id" + "attribute.terraform_workspace_name" = "assertion.terraform_workspace_name" + "attribute.terraform_organization_id" = "assertion.terraform_organization_id" + "attribute.terraform_organization_name" = "assertion.terraform_organization_name" + "attribute.terraform_run_id" = "assertion.terraform_run_id" + "attribute.terraform_full_workspace" = "assertion.terraform_full_workspace" + } + oidc { + # Should be different if self hosted TFE instance is used + issuer_uri = var.issuer_uri + } +} + +############################################################################### +# Service Account and IAM bindings # +############################################################################### + +module "sa-tfe" { + source = "../../../../modules/iam-service-account" + project_id = module.project.project_id + name = "sa-tfe" + + iam = { + # We allow only tokens generated by a specific TFE workspace impersonation of the service account, + # that way one identity pool can be used for a TFE Organization, but every workspace will be able to impersonate only a specifc SA + "roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfe-pool.name}/attribute.terraform_workspace_id/${var.tfe_workspace_id}"] + } + + iam_project_roles = { + "${module.project.project_id}" = [ + "roles/storage.admin" + ] + } +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf new file mode 100644 index 00000000..46d7f6b0 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/outputs.tf @@ -0,0 +1,34 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +output "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity." + value = module.sa-tfe.email +} + +output "project_id" { + description = "GCP Project ID." + value = module.project.project_id +} + +output "workload_identity_audience" { + description = "TFC Workload Identity Audience." + value = "//iam.googleapis.com/${google_iam_workload_identity_pool_provider.tfe-pool-provider.name}" +} + +output "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + value = google_iam_workload_identity_pool_provider.tfe-pool-provider.name +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template new file mode 100644 index 00000000..645eea0b --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/terraform.auto.tfvars.template @@ -0,0 +1,20 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +parent = "folders/437102807785" +project_id = "my-project-id" +tfe_organization_id = "org-W3bz9neazHrZz99U" +tfe_workspace_id = "ws-DFxEE3NmeMdaAvoK" +billing_account = "015617-1B8CBC-AF10D9" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf new file mode 100644 index 00000000..3719b183 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider/variables.tf @@ -0,0 +1,68 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +variable "billing_account" { + description = "Billing account id used as default for new projects." + type = string +} + +variable "issuer_uri" { + description = "Terraform Enterprise uri. Replace the uri if a self hosted instance is used." + type = string + default = "https://app.terraform.io/" +} + +variable "parent" { + description = "Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format." + type = string + default = null + validation { + condition = var.parent == null || can(regex("(organizations|folders)/[0-9]+", var.parent)) + error_message = "Parent must be of the form folders/folder_id or organizations/organization_id." + } +} + +variable "project_create" { + description = "Create project instead of using an existing one." + type = bool + default = true +} + +variable "project_id" { + description = "Existing project id." + type = string +} + +variable "tfe_organization_id" { + description = "TFE organization id." + type = string +} + +variable "tfe_workspace_id" { + description = "TFE workspace id." + type = string +} + +variable "workload_identity_pool_id" { + description = "Workload identity pool id." + type = string + default = "tfe-pool" +} + +variable "workload_identity_pool_provider_id" { + description = "Workload identity pool provider id." + type = string + default = "tfe-provider" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md new file mode 100644 index 00000000..9be8a09b --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/README.md @@ -0,0 +1,18 @@ +# GCP Workload Identity Provider for Terraform Enterprise + +This terraform code is a part of [GCP Workload Identity Federation for Terraform Enterprise](../) blueprint. For instructions please refer to the blueprint [readme](../README.md). + +The codebase provisions the following list of resources: + +- GCS Bucket + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [impersonate_service_account_email](variables.tf#L21) | Service account to be impersonated by workload identity. | string | ✓ | | +| [project_id](variables.tf#L16) | GCP project ID. | string | ✓ | | +| [workload_identity_pool_provider_id](variables.tf#L26) | GCP workload identity pool provider ID. | string | ✓ | | + + diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template new file mode 100644 index 00000000..87d4737d --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/backend.tf.template @@ -0,0 +1,29 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# The block below configures Terraform to use the 'remote' backend with Terraform Cloud. +# For more information, see https://www.terraform.io/docs/backends/types/remote.html + +terraform { + backend "remote" { + organization = "" + + workspaces { + name = "" + } + } + + required_version = ">= 0.14.0" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf new file mode 100644 index 00000000..5e03ada5 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/main.tf @@ -0,0 +1,25 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +############################################################################### +# TEST RESOURCE TO VALIDATE WIF # +############################################################################### + +resource "google_storage_bucket" "test-bucket" { + project = var.project_id + name = "${var.project_id}-tfe-oidc-test-bucket" + location = "US" + force_destroy = true +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf new file mode 100644 index 00000000..47f24620 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/provider.tf @@ -0,0 +1,25 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +module "tfe_oidc" { + source = "./tfc-oidc" + + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + impersonate_service_account_email = var.impersonate_service_account_email +} + +provider "google" { + credentials = module.tfe_oidc.credentials +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template new file mode 100644 index 00000000..efea4cc9 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/terraform.auto.tfvars.template @@ -0,0 +1,17 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = "tfe-oidc-workflow" +workload_identity_pool_provider_id = "projects/683987109094/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" +impersonate_service_account_email = "sa-tfe@tfe-oidc-workflow2.iam.gserviceaccount.com" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md new file mode 100644 index 00000000..bb8d7983 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/README.md @@ -0,0 +1,40 @@ +# Terraform Enterprise OIDC Credential for GCP Workload Identity Federation + +This is a helper module to prepare GCP Credentials from Terraform Enterprise workload identity token. For more information see [Terraform Enterprise Workload Identity Federation](../) blueprint. + +## Example +```hcl +module "tfe_oidc" { + source = "./tfe_oidc" + + workload_identity_pool_provider_id = "projects/683987109094/locations/global/workloadIdentityPools/tfe-pool/providers/tfe-provider" + impersonate_service_account_email = "tfe-test@tfe-test-wif.iam.gserviceaccount.com" +} + +provider "google" { + credentials = module.tfe_oidc.credentials +} + +provider "google-beta" { + credentials = module.tfe_oidc.credentials +} + +# tftest skip +``` + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [impersonate_service_account_email](variables.tf#L22) | Service account to be impersonated by workload identity federation. | string | ✓ | | +| [workload_identity_pool_provider_id](variables.tf#L17) | GCP workload identity pool provider ID. | string | ✓ | | +| [tmp_oidc_token_path](variables.tf#L27) | Name of the temporary file where TFC OIDC token will be stored to authentificate terraform provider google. | string | | ".oidc_token" | + +## Outputs + +| name | description | sensitive | +|---|---|:---:| +| [credentials](outputs.tf#L17) | | | + + diff --git a/tests/modules/organization_policy/fixture/main.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf similarity index 74% rename from tests/modules/organization_policy/fixture/main.tf rename to blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf index 09a09267..2c510a6a 100644 --- a/tests/modules/organization_policy/fixture/main.tf +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/main.tf @@ -14,9 +14,10 @@ * limitations under the License. */ -module "org-policy" { - source = "../../../../modules/organization-policy" - - config_directory = var.config_directory - policies = var.policies +locals { + audience = "//iam.googleapis.com/${var.workload_identity_pool_provider_id}" +} + +data "external" "oidc_token_file" { + program = ["bash", "${path.module}/write_token.sh", "${var.tmp_oidc_token_path}"] } diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf new file mode 100644 index 00000000..fbcea8c2 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/outputs.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "credentials" { + value = jsonencode({ + "type" : "external_account", + "audience" : "${local.audience}", + "subject_token_type" : "urn:ietf:params:oauth:token-type:jwt", + "token_url" : "https://sts.googleapis.com/v1/token", + "credential_source" : data.external.oidc_token_file.result + "service_account_impersonation_url" : "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/${var.impersonate_service_account_email}:generateAccessToken" + }) +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf new file mode 100644 index 00000000..06f310da --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/variables.tf @@ -0,0 +1,31 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + type = string +} + +variable "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity federation." + type = string +} + +variable "tmp_oidc_token_path" { + description = "Name of the temporary file where TFC OIDC token will be stored to authentificate terraform provider google." + type = string + default = ".oidc_token" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf new file mode 100644 index 00000000..a079e99c --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/versions.tf @@ -0,0 +1,17 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +terraform { + required_version = ">= 1.3.1" +} diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh new file mode 100644 index 00000000..2f7e30a2 --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/tfc-oidc/write_token.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Exit if any of the intermediate steps fail +set -e + +FILENAME=$@ + +echo $TFC_WORKLOAD_IDENTITY_TOKEN > $FILENAME + +echo -n "{\"file\":\"${FILENAME}\"}" diff --git a/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf new file mode 100644 index 00000000..3a1d81dc --- /dev/null +++ b/blueprints/cloud-operations/terraform-enterprise-wif/tfc-workflow-using-wif/variables.tf @@ -0,0 +1,29 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +variable "project_id" { + description = "GCP project ID." + type = string +} + +variable "impersonate_service_account_email" { + description = "Service account to be impersonated by workload identity." + type = string +} + +variable "workload_identity_pool_provider_id" { + description = "GCP workload identity pool provider ID." + type = string +} diff --git a/blueprints/cloud-operations/workload-identity-federation/README.md b/blueprints/cloud-operations/workload-identity-federation/README.md index fb990342..ad6feaed 100644 --- a/blueprints/cloud-operations/workload-identity-federation/README.md +++ b/blueprints/cloud-operations/workload-identity-federation/README.md @@ -1,9 +1,9 @@ -# Configuring workload identity federation to access Google Cloud resources from apps running on Azure +# Configuring Workload Identity Federation to access Google Cloud resources from apps running on Azure The most straightforward way for workloads running outside of Google Cloud to call Google Cloud APIs is by using a downloaded service account key. However, this approach has 2 major pain points: * A management hassle, keys need to be stored securely and rotated often. -* A security risk, keys are long term credentials that could be compromised. +* A security risk, keys are long term credentials that could be compromised. Workload identity federation enables applications running outside of Google Cloud to replace long-lived service account keys with short-lived access tokens. This is achieved by configuring Google Cloud to trust an external identity provider, so applications can use the credentials issued by the external identity provider to impersonate a service account. @@ -19,17 +19,17 @@ The provided terraform configuration will set up the following architecture: * On Azure: - * An Azure Active Directory application and a service principal. By default, the new application grants all users in the Azure AD tenant permission to obtain access tokens. So an app role assignment will be required to restrict which identities can obtain access tokens for the application. + * An Azure Active Directory application and a service principal. By default, the new application grants all users in the Azure AD tenant permission to obtain access tokens. So an app role assignment will be required to restrict which identities can obtain access tokens for the application. - * Optionally, all the resources required to have a VM configured to run with a system-assigned managed identity and accessible via SSH on a public IP using public key authentication, so we can log in to the machine and run the `gcloud` command to verify that everything works as expected. + * Optionally, all the resources required to have a VM configured to run with a system-assigned managed identity and accessible via SSH on a public IP using public key authentication, so we can log in to the machine and run the `gcloud` command to verify that everything works as expected. * On Google Cloud: - * A Google Cloud project with: + * A Google Cloud project with: - * A workload identity pool and provider configured to trust the AAD application + * A workload identity pool and provider configured to trust the AAD application - * A service account with the Viewer role granted on the project. The external identities in the workload identity pool would be assigned the Workload Identity User role on that service account. + * A service account with the Viewer role granted on the project. The external identities in the workload identity pool would be assigned the Workload Identity User role on that service account. ## Running the blueprint @@ -42,7 +42,7 @@ Clone this repository or [open it in cloud shell](https://ssh.cloud.google.com/c Once the resources have been created, do the following to verify that everything works as expected: -1. Log in to the VM. +1. Log in to the VM. If you have created the VM using this terraform configuration proceed the following way: @@ -72,7 +72,6 @@ Once the resources have been created, do the following to verify that everything `gcloud projects describe PROJECT_ID` - Once done testing, you can clean up resources by running `terraform destroy`. diff --git a/blueprints/data-solutions/README.md b/blueprints/data-solutions/README.md index 968d7b9c..4919f29a 100644 --- a/blueprints/data-solutions/README.md +++ b/blueprints/data-solutions/README.md @@ -6,32 +6,32 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Cloud SQL instance with multi-region read replicas + + +This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https://cloud.google.com/sql) with multi-region read replicas as described in the [Cloud SQL for PostgreSQL disaster recovery](https://cloud.google.com/architecture/cloud-sql-postgres-disaster-recovery-complete-failover-fallback) article. + +
+ ### GCE and GCS CMEK via centralized Cloud KMS This [blueprint](./cmek-via-centralized-kms/) implements [CMEK](https://cloud.google.com/kms/docs/cmek) for GCS and GCE, via keys hosted in KMS running in a centralized project. The blueprint shows the basic resources and permissions for the typical use case of application projects implementing encryption at rest via a centrally managed KMS service. +
-### Cloud Storage to Bigquery with Cloud Dataflow with least privileges +### Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key + + +This [blueprint](./composer-2/) creates a [Cloud Composer](https://cloud.google.com/composer/) version 2 instance on a VPC with a dedicated service account. The solution supports as inputs: a Shared VPC and Cloud KMS CMEK keys. - This [blueprint](./gcs-to-bq-with-least-privileges/) implements resources required to run GCS to BigQuery Dataflow pipelines. The solution rely on a set of Services account created with the least privileges principle.
### Data Platform Foundations This [blueprint](./data-platform-foundations/) implements a robust and flexible Data Foundation on GCP that provides opinionated defaults, allowing customers to build and scale out additional data pipelines quickly and reliably. -
-### SQL Server Always On Availability Groups - - -This [blueprint](./data-platform-foundations/) implements SQL Server Always On Availability Groups using Fabric modules. It builds a two node cluster with a fileshare witness instance in an existing VPC and adds the necessary firewalling. The actual setup process (apart from Active Directory operations) has been scripted, so that least amount of manual works needs to performed. -
- -### Cloud SQL instance with multi-region read replicas - - -This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https://cloud.google.com/sql) with multi-region read replicas as described in the [Cloud SQL for PostgreSQL disaster recovery](https://cloud.google.com/architecture/cloud-sql-postgres-disaster-recovery-complete-failover-fallback) article.
### Data Playground starter with Cloud Vertex AI Notebook and GCS @@ -40,11 +40,18 @@ This [blueprint](./cloudsql-multiregion/) creates a [Cloud SQL instance](https:/ This [blueprint](./data-playground/) creates a [Vertex AI Notebook](https://cloud.google.com/vertex-ai/docs/workbench/introduction) running on a VPC with a private IP and a dedicated Service Account. A GCS bucket and a BigQuery dataset are created to store inputs and outputs of data experiments. +
-### Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key +### Cloud Storage to Bigquery with Cloud Dataflow with least privileges - -This [blueprint](./composer-2/) creates a [Cloud Composer](https://cloud.google.com/composer/) version 2 instance on a VPC with a dedicated service account. The solution supports as inputs: a Shared VPC and Cloud KMS CMEK keys. -
\ No newline at end of file + This [blueprint](./gcs-to-bq-with-least-privileges/) implements resources required to run GCS to BigQuery Dataflow pipelines. The solution rely on a set of Services account created with the least privileges principle. + +
+ +### SQL Server Always On Availability Groups + + +This [blueprint](./data-platform-foundations/) implements SQL Server Always On Availability Groups using Fabric modules. It builds a two node cluster with a fileshare witness instance in an existing VPC and adds the necessary firewalling. The actual setup process (apart from Active Directory operations) has been scripted, so that least amount of manual works needs to performed. + +
diff --git a/blueprints/data-solutions/cloudsql-multiregion/README.md b/blueprints/data-solutions/cloudsql-multiregion/README.md index babacd58..5bdc6329 100644 --- a/blueprints/data-solutions/cloudsql-multiregion/README.md +++ b/blueprints/data-solutions/cloudsql-multiregion/README.md @@ -39,7 +39,7 @@ If `project_create` is left to `null`, the identity performing the deployment ne Click on the image below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](images/button.png)](https://goo.gle/GoCloudSQL) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoCloudSQL) This will clone the repository to your cloud shell and a screen like this one will appear: @@ -81,7 +81,8 @@ This implementation is intentionally minimal and easy to read. A real world use - Using VPC-SC to mitigate data exfiltration ### Shared VPC -The example supports the configuration of a Shared VPC as an input variable. + +The example supports the configuration of a Shared VPC as an input variable. To deploy the solution on a Shared VPC, you have to configure the `network_config` variable: ``` @@ -94,12 +95,14 @@ network_config = { ``` To run this example, the Shared VPC project needs to have: - - A Private Service Connect with a range of `/24` (example: `10.60.0.0/24`) to deploy the Cloud SQL instance. - - Internet access configured (for example Cloud NAT) to let the Test VM download packages. + +- A Private Service Connect with a range of `/24` (example: `10.60.0.0/24`) to deploy the Cloud SQL instance. +- Internet access configured (for example Cloud NAT) to let the Test VM download packages. In order to run the example and deploy Cloud SQL on a shared VPC the identity running Terraform must have the following IAM role on the Shared VPC Host project. - - Compute Network Admin (roles/compute.networkAdmin) - - Compute Shared VPC Admin (roles/compute.xpnAdmin) + +- Compute Network Admin (roles/compute.networkAdmin) +- Compute Shared VPC Admin (roles/compute.xpnAdmin) ## Test your environment diff --git a/blueprints/data-solutions/cloudsql-multiregion/images/button.png b/blueprints/data-solutions/cloudsql-multiregion/images/button.png deleted file mode 100644 index 21a3f3de..00000000 Binary files a/blueprints/data-solutions/cloudsql-multiregion/images/button.png and /dev/null differ diff --git a/blueprints/data-solutions/cmek-via-centralized-kms/versions.tf b/blueprints/data-solutions/cmek-via-centralized-kms/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/data-solutions/cmek-via-centralized-kms/versions.tf +++ b/blueprints/data-solutions/cmek-via-centralized-kms/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/data-solutions/data-platform-foundations/03-orchestration.tf b/blueprints/data-solutions/data-platform-foundations/03-orchestration.tf index 7519fa8a..2990a2c5 100644 --- a/blueprints/data-solutions/data-platform-foundations/03-orchestration.tf +++ b/blueprints/data-solutions/data-platform-foundations/03-orchestration.tf @@ -67,8 +67,10 @@ module "orch-project" { "roles/storage.objectViewer" = [module.load-sa-df-0.iam_email] } oslogin = false - policy_boolean = { - "constraints/compute.requireOsLogin" = false + org_policies = { + "constraints/compute.requireOsLogin" = { + enforce = false + } } services = concat(var.project_services, [ "artifactregistry.googleapis.com", @@ -82,6 +84,7 @@ module "orch-project" { "container.googleapis.com", "containerregistry.googleapis.com", "dataflow.googleapis.com", + "orgpolicy.googleapis.com", "pubsub.googleapis.com", "servicenetworking.googleapis.com", "storage.googleapis.com", diff --git a/blueprints/data-solutions/data-platform-foundations/README.md b/blueprints/data-solutions/data-platform-foundations/README.md index 034bb32a..51545d58 100644 --- a/blueprints/data-solutions/data-platform-foundations/README.md +++ b/blueprints/data-solutions/data-platform-foundations/README.md @@ -160,9 +160,10 @@ You can find more details and best practices on using DLP to De-identification a [Data Catalog](https://cloud.google.com/data-catalog) helps you to document your data entry at scale. Data Catalog relies on [tags](https://cloud.google.com/data-catalog/docs/tags-and-tag-templates#tags) and [tag template](https://cloud.google.com/data-catalog/docs/tags-and-tag-templates#tag-templates) to manage metadata for all data entries in a unified and centralized service. To implement [column-level security](https://cloud.google.com/bigquery/docs/column-level-security-intro) on BigQuery, we suggest to use `Tags` and `Tag templates`. The default configuration will implement 3 tags: - - `3_Confidential`: policy tag for columns that include very sensitive information, such as credit card numbers. - - `2_Private`: policy tag for columns that include sensitive personal identifiable information (PII) information, such as a person's first name. - - `1_Sensitive`: policy tag for columns that include data that cannot be made public, such as the credit limit. + +- `3_Confidential`: policy tag for columns that include very sensitive information, such as credit card numbers. +- `2_Private`: policy tag for columns that include sensitive personal identifiable information (PII) information, such as a person's first name. +- `1_Sensitive`: policy tag for columns that include data that cannot be made public, such as the credit limit. Anything that is not tagged is available to all users who have access to the data warehouse. @@ -222,7 +223,7 @@ module "data-platform" { prefix = "myprefix" } -# tftest modules=42 resources=315 +# tftest modules=42 resources=316 ``` ## Customizations @@ -238,7 +239,7 @@ To do this, you need to remove IAM binging at project-level for the `data-analys ## Demo pipeline -The application layer is out of scope of this script. As a demo purpuse only, several Cloud Composer DAGs are provided. Demos will import data from the `drop off` area to the `Data Warehouse Confidential` dataset suing different features. +The application layer is out of scope of this script. As a demo purpuse only, several Cloud Composer DAGs are provided. Demos will import data from the `drop off` area to the `Data Warehouse Confidential` dataset suing different features. You can find examples in the `[demo](./demo)` folder. diff --git a/blueprints/data-solutions/data-playground/main.tf b/blueprints/data-solutions/data-playground/main.tf index 2bcd69ab..bcdea5df 100644 --- a/blueprints/data-solutions/data-playground/main.tf +++ b/blueprints/data-solutions/data-playground/main.tf @@ -35,13 +35,16 @@ module "project" { "dataflow.googleapis.com", "ml.googleapis.com", "notebooks.googleapis.com", + "orgpolicy.googleapis.com", "servicenetworking.googleapis.com", "stackdriver.googleapis.com", "storage.googleapis.com", "storage-component.googleapis.com" ] - policy_boolean = { - # "constraints/compute.requireOsLogin" = false + org_policies = { + # "constraints/compute.requireOsLogin" = { + # enforce = false + # } # Example of applying a project wide policy, mainly useful for Composer } service_encryption_key_ids = { diff --git a/blueprints/data-solutions/data-playground/versions.tf b/blueprints/data-solutions/data-playground/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/data-solutions/data-playground/versions.tf +++ b/blueprints/data-solutions/data-playground/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md index 6025ad7f..915ada21 100644 --- a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md +++ b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/README.md @@ -60,8 +60,7 @@ __Note__: To grant a user a role, take a look at the [Granting and Revoking Acce Click on the button below, sign in if required and when the prompt appears, click on “confirm”. - -[![Open Cloudshell](images/shell_button.png)](https://goo.gle/GoDataPipe) +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoDataPipe) This will clone the repository to your cloud shell and a screen like this one will appear: @@ -146,13 +145,13 @@ Once this is done, the 3 files necessary to run the Dataflow Job will have been Run the following command to start the dataflow job: - gcloud --impersonate-service-account=orchestrator@$SERVICE_PROJECT_ID.iam.gserviceaccount.com dataflow jobs run test_batch_01 \ + gcloud --impersonate-service-account=orchestrator@$SERVICE_PROJECT_ID.iam.gserviceaccount.com dataflow jobs run test_batch_01 \ --gcs-location gs://dataflow-templates/latest/GCS_Text_to_BigQuery \ --project $SERVICE_PROJECT_ID \ --region europe-west1 \ --disable-public-ips \ --subnetwork https://www.googleapis.com/compute/v1/projects/$SERVICE_PROJECT_ID/regions/europe-west1/subnetworks/subnet \ - --staging-location gs://$PREFIX-df-tmp\ + --staging-location gs://$PREFIX-df-tmp \ --service-account-email df-loading@$SERVICE_PROJECT_ID.iam.gserviceaccount.com \ --parameters \ javascriptTextTransformFunctionName=transform,\ diff --git a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/versions.tf b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/versions.tf +++ b/blueprints/data-solutions/gcs-to-bq-with-least-privileges/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/data-solutions/sqlserver-alwayson/main.tf b/blueprints/data-solutions/sqlserver-alwayson/main.tf index 3d391c5f..6485b46d 100644 --- a/blueprints/data-solutions/sqlserver-alwayson/main.tf +++ b/blueprints/data-solutions/sqlserver-alwayson/main.tf @@ -13,25 +13,54 @@ # limitations under the License. locals { - prefix = var.prefix != "" ? format("%s-", var.prefix) : "" - vpc_project = var.shared_vpc_project_id != null ? var.shared_vpc_project_id : module.project.project_id - - network = module.vpc.self_link - subnetwork = var.project_create != null ? module.vpc.subnet_self_links[format("%s/%s", var.region, var.subnetwork)] : data.google_compute_subnetwork.subnetwork[0].self_link - - node_base = format("%s%s", local.prefix, var.node_name) - node_prefix = length(local.node_base) > 12 ? substr(local.node_base, 0, 12) : local.node_base - node_netbios_names = [for idx in range(1, 3) : format("%s-%02d", local.node_prefix, idx)] - witness_name = format("%s%s", local.prefix, var.witness_name) - witness_netbios_name = length(local.witness_name) > 15 ? substr(local.witness_name, 0, 15) : local.witness_name - zones = var.project_create == null ? data.google_compute_zones.zones[0].names : formatlist("${var.region}-%s", ["a", "b", "c"]) - node_zones = merge({ for idx, node_name in local.node_netbios_names : node_name => local.zones[idx] }, - { (local.witness_netbios_name) = local.zones[length(local.zones) - 1] }) - - cluster_full_name = format("%s%s", local.prefix, var.cluster_name) - cluster_netbios_name = length(local.cluster_full_name) > 15 ? substr(local.cluster_full_name, 0, 15) : local.cluster_full_name - - ad_user_password_secret = format("%s%s-password", local.prefix, var.cluster_name) + ad_user_password_secret = "${local.cluster_full_name}-password" + cluster_full_name = "${local.prefix}${var.cluster_name}" + cluster_netbios_name = ( + length(local.cluster_full_name) > 15 + ? substr(local.cluster_full_name, 0, 15) + : local.cluster_full_name + ) + network = module.vpc.self_link + node_base = "${local.prefix}${var.node_name}" + node_prefix = ( + length(local.node_base) > 12 + ? substr(local.node_base, 0, 12) + : local.node_base + ) + node_netbios_names = [ + for idx in range(1, 3) : format("%s-%02d", local.node_prefix, idx) + ] + node_zones = merge( + { + for idx, node_name in local.node_netbios_names : + node_name => local.zones[idx] + }, + { + (local.witness_netbios_name) = local.zones[length(local.zones) - 1] + } + ) + prefix = var.prefix != "" ? "${var.prefix}-" : "" + subnetwork = ( + var.project_create != null + ? module.vpc.subnet_self_links["${var.region}/${var.subnetwork}"] + : data.google_compute_subnetwork.subnetwork[0].self_link + ) + vpc_project = ( + var.shared_vpc_project_id != null + ? var.shared_vpc_project_id + : module.project.project_id + ) + witness_name = "${local.prefix}${var.witness_name}" + witness_netbios_name = ( + length(local.witness_name) > 15 + ? substr(local.witness_name, 0, 15) + : local.witness_name + ) + zones = ( + var.project_create == null + ? data.google_compute_zones.zones[0].names + : formatlist("${var.region}-%s", ["a", "b", "c"]) + ) } module "project" { diff --git a/blueprints/data-solutions/sqlserver-alwayson/vpc.tf b/blueprints/data-solutions/sqlserver-alwayson/vpc.tf index dbbf38a7..d3e816cb 100644 --- a/blueprints/data-solutions/sqlserver-alwayson/vpc.tf +++ b/blueprints/data-solutions/sqlserver-alwayson/vpc.tf @@ -15,26 +15,36 @@ # tfdoc:file:description Creates the VPC and manages the firewall rules and ILB. locals { - listeners = { for aog in var.always_on_groups : format("%slb-%s", local.prefix, aog) => { - region = var.region - subnetwork = local.subnetwork + internal_addresses = merge( + local.listeners, + local.node_ips, + { + "${local.prefix}cluster" = { + region = var.region + subnetwork = local.subnetwork + } + (local.witness_netbios_name) = { + region = var.region + subnetwork = local.subnetwork + } } + ) + internal_address_ips = { + for k, v in module.ip-addresses.internal_addresses : + k => v.address } - node_ips = { for node_name in local.node_netbios_names : node_name => { - region = var.region - subnetwork = local.subnetwork - } - } - internal_addresses = merge({ - format("%scluster", local.prefix) = { + listeners = { + for aog in var.always_on_groups : "${local.prefix}lb-${aog}" => { region = var.region subnetwork = local.subnetwork } - (local.witness_netbios_name) = { + } + node_ips = { + for node_name in local.node_netbios_names : node_name => { region = var.region subnetwork = local.subnetwork } - }, local.listeners, local.node_ips) + } } data "google_compute_zones" "zones" { @@ -50,7 +60,6 @@ data "google_compute_subnetwork" "subnetwork" { region = var.region } -# Create VPC if required module "vpc" { source = "../../../modules/net-vpc" @@ -66,7 +75,6 @@ module "vpc" { vpc_create = var.project_create != null ? true : false } -# Firewall rules required for WSFC nodes module "firewall" { source = "../../../modules/net-vpc-firewall" project_id = local.vpc_project @@ -76,7 +84,7 @@ module "firewall" { https_source_ranges = [] ssh_source_ranges = [] custom_rules = { - format("%sallow-all-between-wsfc-nodes", local.prefix) = { + "${local.prefix}allow-all-between-wsfc-nodes" = { description = "Allow all between WSFC nodes" direction = "INGRESS" action = "allow" @@ -91,7 +99,7 @@ module "firewall" { ] extra_attributes = {} } - format("%sallow-all-between-wsfc-witness", local.prefix) = { + "${local.prefix}allow-all-between-wsfc-witness" = { description = "Allow all between WSFC witness nodes" direction = "INGRESS" action = "allow" @@ -106,7 +114,7 @@ module "firewall" { ] extra_attributes = {} } - format("%sallow-sql-to-wsfc-nodes", local.prefix) = { + "${local.prefix}allow-sql-to-wsfc-nodes" = { description = "Allow SQL connections to WSFC nodes" direction = "INGRESS" action = "allow" @@ -119,7 +127,7 @@ module "firewall" { ] extra_attributes = {} } - format("%sallow-health-check-to-wsfc-nodes", local.prefix) = { + "${local.prefix}allow-health-check-to-wsfc-nodes" = { description = "Allow health checks to WSFC nodes" direction = "INGRESS" action = "allow" @@ -135,39 +143,31 @@ module "firewall" { } } -# IP Address reservation for cluster and listener module "ip-addresses" { - source = "../../../modules/net-address" - project_id = local.vpc_project - + source = "../../../modules/net-address" + project_id = local.vpc_project internal_addresses = local.internal_addresses } -# L4 Internal Load Balancer for SQL Listener module "listener-ilb" { - source = "../../../modules/net-ilb" - for_each = toset(var.always_on_groups) - - project_id = var.project_id - region = var.region - - name = format("%s-%s-ilb", var.prefix, each.value) - service_label = format("%s-%s-ilb", var.prefix, each.value) - - address = module.ip-addresses.internal_addresses[format("%slb-%s", local.prefix, each.value)].address - network = local.network - subnetwork = local.subnetwork - + source = "../../../modules/net-ilb" + for_each = toset(var.always_on_groups) + project_id = var.project_id + region = var.region + name = "${var.prefix}-${each.value}-ilb" + service_label = "${var.prefix}-${each.value}-ilb" + address = local.internal_address_ips["${local.prefix}lb-${each.value}"] + vpc_config = { + network = local.network + subnetwork = local.subnetwork + } backends = [for k, node in module.nodes : { - failover = false - group = node.group.self_link - balancing_mode = "CONNECTION" + group = node.group.self_link }] - health_check_config = { - type = "tcp", - check = { port = var.health_check_port }, - config = var.health_check_config, - logging = true + enable_logging = true + tcp = { + port = var.health_check_port + } } } diff --git a/blueprints/factories/net-vpc-firewall-yaml/versions.tf b/blueprints/factories/net-vpc-firewall-yaml/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/factories/net-vpc-firewall-yaml/versions.tf +++ b/blueprints/factories/net-vpc-firewall-yaml/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/factories/project-factory/README.md b/blueprints/factories/project-factory/README.md index e496aa4d..cee829ff 100644 --- a/blueprints/factories/project-factory/README.md +++ b/blueprints/factories/project-factory/README.md @@ -68,13 +68,13 @@ module "projects" { iam = try(each.value.iam, {}) kms_service_agents = try(each.value.kms, {}) labels = try(each.value.labels, {}) - org_policies = try(each.value.org_policies, null) + org_policies = try(each.value.org_policies, {}) service_accounts = try(each.value.service_accounts, {}) services = try(each.value.services, []) service_identities_iam = try(each.value.service_identities_iam, {}) vpc = try(each.value.vpc, null) } -# tftest modules=7 resources=27 +# tftest modules=7 resources=29 ``` ### Projects configuration @@ -153,16 +153,16 @@ labels: environment: prod # [opt] Org policy overrides defined at project level -org_policies: - policy_boolean: - constraints/compute.disableGuestAttributesAccess: true - policy_list: - constraints/compute.trustedImageProjects: - inherit_from_parent: null - status: true - suggested_value: null +org_policies: + constraints/compute.disableGuestAttributesAccess: + enforce: true + constraints/compute.trustedImageProjects: + allow: values: - - projects/fast-prod-iac-core-0 + - projects/fast-dev-iac-core-0 + constraints/compute.vmExternalIpAccess: + deny: + all: true # [opt] Service account to create for the project and their roles on the project # in name => [roles] format @@ -221,23 +221,28 @@ vpc: | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [billing_account_id](variables.tf#L17) | Billing account id. | string | ✓ | | -| [project_id](variables.tf#L119) | Project id. | string | ✓ | | +| [project_id](variables.tf#L157) | Project id. | string | ✓ | | | [billing_alert](variables.tf#L22) | Billing alert configuration. | object({…}) | | null | | [defaults](variables.tf#L35) | Project factory default values. | object({…}) | | null | | [dns_zones](variables.tf#L57) | DNS private zones to create as child of var.defaults.environment_dns_zone. | list(string) | | [] | | [essential_contacts](variables.tf#L63) | Email contacts to be used for billing and GCP notifications. | list(string) | | [] | | [folder_id](variables.tf#L69) | Folder ID for the folder where the project will be created. | string | | null | | [group_iam](variables.tf#L75) | Custom IAM settings in group => [role] format. | map(list(string)) | | {} | -| [iam](variables.tf#L81) | Custom IAM settings in role => [principal] format. | map(list(string)) | | {} | -| [kms_service_agents](variables.tf#L87) | KMS IAM configuration in as service => [key]. | map(list(string)) | | {} | -| [labels](variables.tf#L93) | Labels to be assigned at project level. | map(string) | | {} | -| [org_policies](variables.tf#L99) | Org-policy overrides at project level. | object({…}) | | null | -| [prefix](variables.tf#L113) | Prefix used for the project id. | string | | null | -| [service_accounts](variables.tf#L124) | Service accounts to be created, and roles assigned them on the project. | map(list(string)) | | {} | -| [service_accounts_iam](variables.tf#L130) | IAM bindings on service account resources. Format is KEY => {ROLE => [MEMBERS]} | map(map(list(string))) | | {} | -| [service_identities_iam](variables.tf#L144) | Custom IAM settings for service identities in service => [role] format. | map(list(string)) | | {} | -| [services](variables.tf#L137) | Services to be enabled for the project. | list(string) | | [] | -| [vpc](variables.tf#L151) | VPC configuration for the project. | object({…}) | | null | +| [group_iam_additive](variables.tf#L81) | Custom additive IAM settings in group => [role] format. | map(list(string)) | | {} | +| [iam](variables.tf#L87) | Custom IAM settings in role => [principal] format. | map(list(string)) | | {} | +| [iam_additive](variables.tf#L93) | Custom additive IAM settings in role => [principal] format. | map(list(string)) | | {} | +| [kms_service_agents](variables.tf#L99) | KMS IAM configuration in as service => [key]. | map(list(string)) | | {} | +| [labels](variables.tf#L105) | Labels to be assigned at project level. | map(string) | | {} | +| [org_policies](variables.tf#L111) | Org-policy overrides at project level. | map(object({…})) | | {} | +| [prefix](variables.tf#L151) | Prefix used for the project id. | string | | null | +| [service_accounts](variables.tf#L162) | Service accounts to be created, and roles assigned them on the project. | map(list(string)) | | {} | +| [service_accounts_additive](variables.tf#L168) | Service accounts to be created, and roles assigned them on the project additively. | map(list(string)) | | {} | +| [service_accounts_iam](variables.tf#L174) | IAM bindings on service account resources. Format is KEY => {ROLE => [MEMBERS]} | map(map(list(string))) | | {} | +| [service_accounts_iam_additive](variables.tf#L181) | IAM additive bindings on service account resources. Format is KEY => {ROLE => [MEMBERS]} | map(map(list(string))) | | {} | +| [service_identities_iam](variables.tf#L195) | Custom IAM settings for service identities in service => [role] format. | map(list(string)) | | {} | +| [service_identities_iam_additive](variables.tf#L202) | Custom additive IAM settings for service identities in service => [role] format. | map(list(string)) | | {} | +| [services](variables.tf#L188) | Services to be enabled for the project. | list(string) | | [] | +| [vpc](variables.tf#L209) | VPC configuration for the project. | object({…}) | | null | ## Outputs diff --git a/blueprints/factories/project-factory/main.tf b/blueprints/factories/project-factory/main.tf index 996b79e3..1fe5e1e4 100644 --- a/blueprints/factories/project-factory/main.tf +++ b/blueprints/factories/project-factory/main.tf @@ -21,7 +21,14 @@ locals { "group:${k}" if try(index(v, r), null) != null ] } - _group_iam_bindings = distinct(flatten(values(var.group_iam))) + _group_iam_additive = { + for r in local._group_iam_additive_bindings : r => [ + for k, v in var.group_iam_additive : + "group:${k}" if try(index(v, r), null) != null + ] + } + _group_iam_bindings = distinct(flatten(values(var.group_iam))) + _group_iam_additive_bindings = distinct(flatten(values(var.group_iam_additive))) _project_id = ( var.prefix == null || var.prefix == "" ? var.project_id @@ -37,9 +44,20 @@ locals { _service_accounts_iam_bindings = distinct(flatten( values(var.service_accounts) )) + _service_accounts_iam_additive = { + for r in local._service_accounts_iam_additive_bindings : r => [ + for k, v in var.service_accounts_additive : + module.service-accounts[k].iam_email + if try(index(v, r), null) != null + ] + } + _service_accounts_iam_additive_bindings = distinct(flatten( + values(var.service_accounts_additive) + )) _services = concat([ "billingbudgets.googleapis.com", - "essentialcontacts.googleapis.com" + "essentialcontacts.googleapis.com", + "orgpolicy.googleapis.com", ], length(var.dns_zones) > 0 ? ["dns.googleapis.com"] : [], try(var.vpc.gke_setup, null) != null ? ["container.googleapis.com"] : [], @@ -53,6 +71,14 @@ locals { if contains(roles, role) ] } + _service_identities_roles_additive = distinct(flatten(values(var.service_identities_iam_additive))) + _service_identities_iam_additive = { + for role in local._service_identities_roles_additive : role => [ + for service, roles in var.service_identities_iam_additive : + "serviceAccount:${module.project.service_accounts.robots[service]}" + if contains(roles, role) + ] + } _vpc_subnet_bindings = ( local.vpc.subnets_iam == null || local.vpc.host_project == null ? [] @@ -91,6 +117,20 @@ locals { try(local._service_identities_iam[role], []), ) } + iam_additive = { + for role in distinct(concat( + keys(var.iam_additive), + keys(local._group_iam_additive), + keys(local._service_accounts_iam_additive), + keys(local._service_identities_iam_additive), + )) : + role => concat( + try(var.iam_additive[role], []), + try(local._group_iam_additive[role], []), + try(local._service_accounts_iam_additive[role], []), + try(local._service_identities_iam_additive[role], []), + ) + } labels = merge( coalesce(var.labels, {}), coalesce(try(var.defaults.labels, {}), {}) ) @@ -147,10 +187,10 @@ module "project" { prefix = var.prefix contacts = { for c in local.essential_contacts : c => ["ALL"] } iam = local.iam + iam_additive = local.iam_additive labels = local.labels + org_policies = try(var.org_policies, {}) parent = var.folder_id - policy_boolean = try(var.org_policies.policy_boolean, {}) - policy_list = try(var.org_policies.policy_list, {}) service_encryption_key_ids = var.kms_service_agents services = local.services shared_vpc_service_config = var.vpc == null ? null : { diff --git a/blueprints/factories/project-factory/sample-data/projects/project.yaml b/blueprints/factories/project-factory/sample-data/projects/project.yaml index 13a8f5f5..88ba0bf5 100644 --- a/blueprints/factories/project-factory/sample-data/projects/project.yaml +++ b/blueprints/factories/project-factory/sample-data/projects/project.yaml @@ -48,15 +48,15 @@ labels: # [opt] Org policy overrides defined at project level org_policies: - policy_boolean: - constraints/compute.disableGuestAttributesAccess: true - policy_list: - constraints/compute.trustedImageProjects: - inherit_from_parent: null - status: true - suggested_value: null + constraints/compute.disableGuestAttributesAccess: + enforce: true + constraints/compute.trustedImageProjects: + allow: values: - projects/fast-dev-iac-core-0 + constraints/compute.vmExternalIpAccess: + deny: + all: true # [opt] Service account to create for the project and their roles on the project # in name => [roles] format diff --git a/blueprints/factories/project-factory/variables.tf b/blueprints/factories/project-factory/variables.tf index 6154c032..8efc0bc1 100644 --- a/blueprints/factories/project-factory/variables.tf +++ b/blueprints/factories/project-factory/variables.tf @@ -78,12 +78,24 @@ variable "group_iam" { default = {} } +variable "group_iam_additive" { + description = "Custom additive IAM settings in group => [role] format." + type = map(list(string)) + default = {} +} + variable "iam" { description = "Custom IAM settings in role => [principal] format." type = map(list(string)) default = {} } +variable "iam_additive" { + description = "Custom additive IAM settings in role => [principal] format." + type = map(list(string)) + default = {} +} + variable "kms_service_agents" { description = "KMS IAM configuration in as service => [key]." type = map(list(string)) @@ -98,16 +110,42 @@ variable "labels" { variable "org_policies" { description = "Org-policy overrides at project level." - type = object({ - policy_boolean = map(bool) - policy_list = map(object({ - inherit_from_parent = bool - suggested_value = string - status = bool - values = list(string) + type = map(object({ + inherit_from_parent = optional(bool) # for list policies only. + reset = optional(bool) + + # default (unconditional) values + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) })) - }) - default = null + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + + # conditional values + rules = optional(list(object({ + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + condition = object({ + description = optional(string) + expression = optional(string) + location = optional(string) + title = optional(string) + }) + })), []) + })) + default = {} + nullable = false } variable "prefix" { @@ -127,6 +165,12 @@ variable "service_accounts" { default = {} } +variable "service_accounts_additive" { + description = "Service accounts to be created, and roles assigned them on the project additively." + type = map(list(string)) + default = {} +} + variable "service_accounts_iam" { description = "IAM bindings on service account resources. Format is KEY => {ROLE => [MEMBERS]}" type = map(map(list(string))) @@ -134,6 +178,13 @@ variable "service_accounts_iam" { nullable = false } +variable "service_accounts_iam_additive" { + description = "IAM additive bindings on service account resources. Format is KEY => {ROLE => [MEMBERS]}" + type = map(map(list(string))) + default = {} + nullable = false +} + variable "services" { description = "Services to be enabled for the project." type = list(string) @@ -148,6 +199,13 @@ variable "service_identities_iam" { nullable = false } +variable "service_identities_iam_additive" { + description = "Custom additive IAM settings for service identities in service => [role] format." + type = map(list(string)) + default = {} + nullable = false +} + variable "vpc" { description = "VPC configuration for the project." type = object({ @@ -160,6 +218,3 @@ variable "vpc" { }) default = null } - - - diff --git a/blueprints/gke/README.md b/blueprints/gke/README.md index a2c48071..30418ca4 100644 --- a/blueprints/gke/README.md +++ b/blueprints/gke/README.md @@ -6,6 +6,18 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Binary Authorization Pipeline + + This [blueprint](../gke/binauthz/) shows how to create a CI and a CD pipeline in Cloud Build for the deployment of an application to a private GKE cluster with unrestricted access to a public endpoint. The blueprint enables a Binary Authorization policy in the project so only images that have been attested can be deployed to the cluster. The attestations are created using a cryptographic key pair that has been provisioned in KMS. + +
+ +### Multi-cluster mesh on GKE (fleet API) + + This [blueprint](../gke/multi-cluster-mesh-gke-fleet-api/) shows how to create a multi-cluster mesh for two private clusters on GKE. Anthos Service Mesh with automatic control plane management is set up for clusters using the Fleet API. This can only be done if the clusters are in a single project and in the same VPC. In this particular case both clusters having being deployed to different subnets in a shared VPC. + +
+ ### Multitenant GKE fleet This [blueprint](./multitenant-fleet/) allows simple centralized management of similar sets of GKE clusters and their nodepools in a single project, and optional fleet management via GKE Hub templated configurations. @@ -16,14 +28,5 @@ They are meant to be used as minimal but complete starting points to create actu This [blueprint](../networking/shared-vpc-gke/) shows how to configure a Shared VPC, including the specific IAM configurations needed for GKE, and to give different level of access to the VPC subnets to different identities. It is meant to be used as a starting point for most Shared VPC configurations, and to be integrated to the above blueprints where Shared VPC is needed in more complex network topologies. -
- -### Binary Authorization Pipeline - - This [blueprint](../gke/binauthz/) shows how to create a CI and a CD pipeline in Cloud Build for the deployment of an application to a private GKE cluster with unrestricted access to a public endpoint. The blueprint enables a Binary Authorization policy in the project so only images that have been attested can be deployed to the cluster. The attestations are created using a cryptographic key pair that has been provisioned in KMS. -
- -### Multi-cluster mesh on GKE (fleet API) - - This [blueprint](../gke/multi-cluster-mesh-gke-fleet-api/) shows how to create a multi-cluster mesh for two private clusters on GKE. Anthos Service Mesh with automatic control plane management is set up for clusters using the Fleet API. This can only be done if the clusters are in a single project and in the same VPC. In this particular case both clusters having being deployed to different subnets in a shared VPC. +
diff --git a/blueprints/gke/binauthz/main.tf b/blueprints/gke/binauthz/main.tf index 79323943..0c3655e4 100644 --- a/blueprints/gke/binauthz/main.tf +++ b/blueprints/gke/binauthz/main.tf @@ -99,13 +99,15 @@ module "cluster" { } module "cluster_nodepool" { - source = "../../../modules/gke-nodepool" - project_id = module.project.project_id - cluster_name = module.cluster.name - location = var.zone - name = "nodepool" - service_account = {} - node_count = { initial = 3 } + source = "../../../modules/gke-nodepool" + project_id = module.project.project_id + cluster_name = module.cluster.name + location = var.zone + name = "nodepool" + service_account = { + create = true + } + node_count = { initial = 3 } } module "kms" { diff --git a/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf b/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf index 73ab19b1..6c769d92 100644 --- a/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf +++ b/blueprints/gke/multi-cluster-mesh-gke-fleet-api/gke.tf @@ -44,15 +44,17 @@ module "clusters" { } module "cluster_nodepools" { - for_each = var.clusters_config - source = "../../../modules/gke-nodepool" - project_id = module.fleet_project.project_id - cluster_name = module.clusters[each.key].name - location = var.region - name = "nodepool-${each.key}" - node_count = { initial = 1 } - service_account = {} - tags = ["${each.key}-node"] + for_each = var.clusters_config + source = "../../../modules/gke-nodepool" + project_id = module.fleet_project.project_id + cluster_name = module.clusters[each.key].name + location = var.region + name = "nodepool-${each.key}" + node_count = { initial = 1 } + service_account = { + create = true + } + tags = ["${each.key}-node"] } module "hub" { diff --git a/blueprints/gke/multitenant-fleet/README.md b/blueprints/gke/multitenant-fleet/README.md index ab8c6247..bd6df945 100644 --- a/blueprints/gke/multitenant-fleet/README.md +++ b/blueprints/gke/multitenant-fleet/README.md @@ -246,20 +246,20 @@ module "gke" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [billing_account_id](variables.tf#L17) | Billing account id. | string | ✓ | | -| [folder_id](variables.tf#L129) | Folder used for the GKE project in folders/nnnnnnnnnnn format. | string | ✓ | | -| [prefix](variables.tf#L176) | Prefix used for resources that need unique names. | string | ✓ | | -| [project_id](variables.tf#L181) | ID of the project that will contain all the clusters. | string | ✓ | | -| [vpc_config](variables.tf#L193) | Shared VPC project and VPC details. | object({…}) | ✓ | | -| [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | -| [fleet_configmanagement_clusters](variables.tf#L67) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | -| [fleet_configmanagement_templates](variables.tf#L74) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | -| [fleet_features](variables.tf#L109) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | -| [fleet_workload_identity](variables.tf#L122) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | -| [group_iam](variables.tf#L134) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | -| [iam](variables.tf#L141) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | -| [labels](variables.tf#L148) | Project-level labels. | map(string) | | {} | -| [nodepools](variables.tf#L154) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | -| [project_services](variables.tf#L186) | Additional project services to enable. | list(string) | | [] | +| [folder_id](variables.tf#L132) | Folder used for the GKE project in folders/nnnnnnnnnnn format. | string | ✓ | | +| [prefix](variables.tf#L179) | Prefix used for resources that need unique names. | string | ✓ | | +| [project_id](variables.tf#L184) | ID of the project that will contain all the clusters. | string | ✓ | | +| [vpc_config](variables.tf#L196) | Shared VPC project and VPC details. | object({…}) | ✓ | | +| [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | +| [fleet_configmanagement_clusters](variables.tf#L70) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | +| [fleet_configmanagement_templates](variables.tf#L77) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | +| [fleet_features](variables.tf#L112) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | +| [fleet_workload_identity](variables.tf#L125) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | +| [group_iam](variables.tf#L137) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | +| [iam](variables.tf#L144) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | +| [labels](variables.tf#L151) | Project-level labels. | map(string) | | {} | +| [nodepools](variables.tf#L157) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | +| [project_services](variables.tf#L189) | Additional project services to enable. | list(string) | | [] | ## Outputs diff --git a/blueprints/gke/multitenant-fleet/variables.tf b/blueprints/gke/multitenant-fleet/variables.tf index d0464298..8d6c69ae 100644 --- a/blueprints/gke/multitenant-fleet/variables.tf +++ b/blueprints/gke/multitenant-fleet/variables.tf @@ -39,9 +39,12 @@ variable "clusters" { recurring_window = null maintenance_exclusion = [] }) - max_pods_per_node = optional(number, 110) - min_master_version = optional(string) - monitoring_config = optional(list(string), ["SYSTEM_COMPONENTS"]) + max_pods_per_node = optional(number, 110) + min_master_version = optional(string) + monitoring_config = optional(object({ + enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) + managed_prometheus = optional(bool) + })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) diff --git a/blueprints/networking/README.md b/blueprints/networking/README.md index e234cc25..c4a3d2f0 100644 --- a/blueprints/networking/README.md +++ b/blueprints/networking/README.md @@ -6,11 +6,30 @@ They are meant to be used as minimal but complete starting points to create actu ## Blueprints +### Decentralized firewall management + + This [blueprint](./decentralized-firewall/) shows how a decentralized firewall management can be organized using the [firewall factory](../factories/net-vpc-firewall-yaml/). + +
+ +### Network filtering with Squid + + This [blueprint](./filtering-proxy/) how to deploy a filtering HTTP proxy to restrict Internet access, in a simplified setup using a VPC with two subnets and a Cloud DNS zone, and an optional MIG for scaling. + +
+ +## HTTP Load Balancer with Cloud Armor + + This [blueprint](./glb-and-armor/) contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. + +
+ ### Hub and Spoke via Peering This [blueprint](./hub-and-spoke-peering/) implements a hub and spoke topology via VPC peering, a common design where a landing zone VPC (hub) is connected to on-premises, and then peered with satellite VPCs (spokes) to further partition the infrastructure. The sample highlights the lack of transitivity in peering: the absence of connectivity between spokes, and the need create workarounds for private service access to managed services. One such workaround is shown for private GKE, allowing access from hub and all spokes to GKE masters via a dedicated VPN. +
### Hub and Spoke via Dynamic VPN @@ -18,6 +37,19 @@ The sample highlights the lack of transitivity in peering: the absence of connec This [blueprint](./hub-and-spoke-vpn/) implements a hub and spoke topology via dynamic VPN tunnels, a common design where peering cannot be used due to limitations on the number of spokes or connectivity to managed services. The blueprint shows how to implement spoke transitivity via BGP advertisements, how to expose hub DNS zones to spokes via DNS peering, and allows easy testing of different VPN and BGP configurations. + +
+ +### ILB as next hop + + This [blueprint](./ilb-next-hop/) allows testing [ILB as next hop](https://cloud.google.com/load-balancing/docs/internal/ilb-next-hop-overview) using simple Linux gateway VMS between two VPCs, to emulate virtual appliances. An optional additional ILB can be enabled to test multiple load balancer configurations and hashing. + +
+ +### Nginx-based reverse proxy cluster + + This [blueprint](./nginx-reverse-proxy-cluster/) how to deploy an autoscaling reverse proxy cluster using Nginx, based on regional Managed Instance Groups. The autoscaling is driven by Nginx current connections metric, sent by Cloud Ops Agent. +
### DNS and Private Access for On-premises @@ -25,6 +57,19 @@ The blueprint shows how to implement spoke transitivity via BGP advertisements, This [blueprint](./onprem-google-access-dns/) uses an emulated on-premises environment running in Docker containers inside a GCE instance, to allow testing specific features like DNS policies, DNS forwarding zones across VPN, and Private Access for On-premises hosts. The emulated on-premises environment can be used to test access to different services from outside Google Cloud, by implementing a VPN connection and BGP to Google CLoud via Strongswan and Bird. + +
+ +### Calling a private Cloud Function from on-premises + + This [blueprint](./private-cloud-function-from-onprem/) shows how to invoke a [private Google Cloud Function](https://cloud.google.com/functions/docs/networking/network-settings) from the on-prem environment via a [Private Service Connect endpoint](https://cloud.google.com/vpc/docs/private-service-connect#benefits-apis). + +
+ +### Calling on-premise services through PSC and hybrid NEGs + + This [blueprint](./psc-hybrid/) shows how to privately connect to on-premise services (IP + port) from GCP, leveraging [Private Service Connect (PSC)](https://cloud.google.com/vpc/docs/private-service-connect) and [Hybrid Network Endpoint Groups](https://cloud.google.com/load-balancing/docs/negs/hybrid-neg-concepts). +
### Shared VPC with GKE and per-subnet support @@ -32,24 +77,5 @@ The emulated on-premises environment can be used to test access to different ser This [blueprint](./shared-vpc-gke/) shows how to configure a Shared VPC, including the specific IAM configurations needed for GKE, and to give different level of access to the VPC subnets to different identities. It is meant to be used as a starting point for most Shared VPC configurations, and to be integrated to the above blueprints where Shared VPC is needed in more complex network topologies. -
- -### ILB as next hop - - This [blueprint](./ilb-next-hop/) allows testing [ILB as next hop](https://cloud.google.com/load-balancing/docs/internal/ilb-next-hop-overview) using simple Linux gateway VMS between two VPCs, to emulate virtual appliances. An optional additional ILB can be enabled to test multiple load balancer configurations and hashing. -
- -### Calling a private Cloud Function from on-premises - - This [blueprint](./private-cloud-function-from-onprem/) shows how to invoke a [private Google Cloud Function](https://cloud.google.com/functions/docs/networking/network-settings) from the on-prem environment via a [Private Service Connect endpoint](https://cloud.google.com/vpc/docs/private-service-connect#benefits-apis). -
- -### Calling on-premise services through PSC and hybrid NEGs - - This [blueprint](./psc-hybrid/) shows how to privately connect to on-premise services (IP + port) from GCP, leveraging [Private Service Connect (PSC)](https://cloud.google.com/vpc/docs/private-service-connect) and [Hybrid Network Endpoint Groups](https://cloud.google.com/load-balancing/docs/negs/hybrid-neg-concepts). -
- -### Decentralized firewall management - - This [blueprint](./decentralized-firewall/) shows how a decentralized firewall management can be organized using the [firewall factory](../factories/net-vpc-firewall-yaml/). +
diff --git a/blueprints/networking/decentralized-firewall/main.tf b/blueprints/networking/decentralized-firewall/main.tf index ab42b649..a05a104f 100644 --- a/blueprints/networking/decentralized-firewall/main.tf +++ b/blueprints/networking/decentralized-firewall/main.tf @@ -84,7 +84,7 @@ module "dns-api-prod" { domain = "googleapis.com." client_networks = [module.vpc-prod.self_link] recordsets = { - "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } } } @@ -96,7 +96,7 @@ module "dns-api-dev" { domain = "googleapis.com." client_networks = [module.vpc-dev.self_link] recordsets = { - "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } } } diff --git a/blueprints/networking/decentralized-firewall/versions.tf b/blueprints/networking/decentralized-firewall/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/decentralized-firewall/versions.tf +++ b/blueprints/networking/decentralized-firewall/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/networking/filtering-proxy/main.tf b/blueprints/networking/filtering-proxy/main.tf index 884fbd30..eef3bf2d 100644 --- a/blueprints/networking/filtering-proxy/main.tf +++ b/blueprints/networking/filtering-proxy/main.tf @@ -165,33 +165,31 @@ module "squid-vm" { } module "squid-mig" { - count = var.mig ? 1 : 0 - source = "../../../modules/compute-mig" - project_id = module.project-host.project_id - location = "${var.region}-b" - name = "squid-mig" - target_size = 1 - autoscaler_config = { - max_replicas = 10 - min_replicas = 1 - cooldown_period = 30 - cpu_utilization_target = 0.65 - load_balancing_utilization_target = null - metric = null + count = var.mig ? 1 : 0 + source = "../../../modules/compute-mig" + project_id = module.project-host.project_id + location = "${var.region}-b" + name = "squid-mig" + instance_template = module.squid-vm.template.self_link + target_size = 1 + auto_healing_policies = { + initial_delay_sec = 60 } - default_version = { - instance_template = module.squid-vm.template.self_link - name = "default" + autoscaler_config = { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 30 + scaling_signals = { + cpu_utilization = { + target = 0.65 + } + } } health_check_config = { - type = "tcp" - check = { port = 3128 } - config = {} - logging = true - } - auto_healing_policies = { - health_check = module.squid-mig.0.health_check.self_link - initial_delay_sec = 60 + enable_logging = true + tcp = { + port = 3128 + } } } @@ -201,20 +199,20 @@ module "squid-ilb" { project_id = module.project-host.project_id region = var.region name = "squid-ilb" - service_label = "squid-ilb" - network = module.vpc.self_link - subnetwork = module.vpc.subnet_self_links["${var.region}/proxy"] ports = [3128] + service_label = "squid-ilb" + vpc_config = { + network = module.vpc.self_link + subnetwork = module.vpc.subnet_self_links["${var.region}/proxy"] + } backends = [{ - failover = false - group = module.squid-mig.0.group_manager.instance_group - balancing_mode = "CONNECTION" + group = module.squid-mig.0.group_manager.instance_group }] health_check_config = { - type = "tcp" - check = { port = 3128 } - config = {} - logging = true + enable_logging = true + tcp = { + port = 3128 + } } } @@ -226,13 +224,10 @@ module "folder-apps" { source = "../../../modules/folder" parent = var.root_node name = "apps" - policy_list = { + org_policies = { # prevent VMs with public IPs in the apps folder "constraints/compute.vmExternalIpAccess" = { - inherit_from_parent = false - suggested_value = null - status = false - values = [] + deny = { all = true } } } } diff --git a/blueprints/networking/filtering-proxy/versions.tf b/blueprints/networking/filtering-proxy/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/filtering-proxy/versions.tf +++ b/blueprints/networking/filtering-proxy/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/networking/glb-and-armor/README.md b/blueprints/networking/glb-and-armor/README.md new file mode 100644 index 00000000..0c9a802e --- /dev/null +++ b/blueprints/networking/glb-and-armor/README.md @@ -0,0 +1,140 @@ +# HTTP Load Balancer with Cloud Armor + +## Introduction + +This blueprint contains all necessary Terraform modules to build a multi-regional infrastructure with horizontally scalable managed instance group backends, HTTP load balancing and Google’s advanced WAF security tool (Cloud Armor) on top to securely deploy an application at global scale. + +This tutorial is general enough to fit in a variety of use-cases, from hosting a mobile app's backend to deploy proprietary workloads at scale. + +## Use cases + +Even though there are many ways to implement an architecture, some workloads require high compute power or specific licenses while making sure the services are secured by a managed service and highly available across multiple regions. An architecture consisting of Managed Instance Groups in multiple regions available through an HTTP Load Balancer with Cloud Armor enabled is suitable for such use-cases. + +This architecture caters to multiple workloads ranging from the ones requiring compliance with specific data access restrictions to compute-specific proprietary applications with specific licensing and OS requirements. Descriptions of some possible use-cases are as follows: + +* __Proprietary OS workloads__: Some applications require specific Operating systems (enterprise grade Linux distributions for example) with specific licensing requirements or low-level access to the kernel. In such cases, since the applications cannot be containerised and horizontal scaling is required, multi-region Managed Instance Group (MIG) with custom instance images are the ideal implementation. +* __Industry-specific applications__: Other applications may require high compute power alongside a sophisticated layer of networking security. This architecture satisfies both these requirements by promising configurable compute power on the instances backed by various features offered by Cloud Armor such as traffic restriction, DDoS protection etc. +* __Workloads requiring GDPR compliance__: Most applications require restricting data access and usage from outside a certain region (mostly to comply with data residency requirements). This architecture caters to such workloads as Cloud Armor allows you to lock access to your workloads from various fine-grained identifiers. +* __Medical Queuing systems__: Another great example usage for this architecture will be applications requiring high compute power, availability and limited memory access requirements such as a medical queuing system. +* __DDoS Protection and WAF__: Applications and workloads exposed to the internet expose themselves to the risk of DDoS attacks. While L3/L4 and protocol based attacks are handled at Google’s edge, L7 attacks can still be effective with botnets. A setup of an external Cloud Load Balancer with Cloud Armor and appropriate WAF rules can mitigate such attacks. +* __Geofencing__: If you want to restrict content served on your application due to licensing restrictions (similar to OTT content in the US), Geofencing allows you to create a virtual perimeter to stop the service from being accessed outside the region. The architecture of using a Cloud Load Balancer with Cloud Armor enables you to implement geofencing around your applications and services. + +## Architecture + +

+ +The main components that we would be setting up are (to learn more about these products, click on the hyperlinks): + +* [Cloud Armor](https://cloud.google.com/armor) - Google Cloud Armor is the web-application firewall (WAF) and DDoS mitigation service that helps users defend their web apps and services at Google scale at the edge of Google’s network. +* [Cloud Load Balancer](https://cloud.google.com/load-balancing) - When your app usage spikes, it is important to scale, optimize and secure the app. Cloud Load Balancing is a fully distributed solution that balances user traffic to multiple backends to avoid congestion, reduce latency and increase security. Some important features it offers that we use here are: + * Single global anycast IP and autoscaling - CLB acts as a frontend to all your backend instances across all regions. It provides cross-region load balancing, automatic multi-region failover and scales to support increase in resources. + * Global Forwarding Rule - To route traffic to different regions, global load balancers use global forwarding rules, which bind the global IP address and a single target proxy. + * Target Proxy - For external HTTP(S) load balancers, proxies route incoming requests to a URL map. This is essentially how you can handle the connections. + * URL Map - URL Maps are used to route requests to a backend service based on the rules that you define for the host and path of an incoming URL. + * Backend Service - A Backend Service defines CLB distributes traffic. The backend service configuration consists of a set of values - protocols to connect to backends, session settings, health checks and timeouts. + * Health Check - Health check is a method provided to determine if the corresponding backends respond to traffic. Health checks connect to backends on a configurable, periodic basis. Each connection attempt is called a probe. Google Cloud records the success or failure of each probe. +* [Firewall Rules](https://cloud.google.com/vpc/docs/firewalls) - Firewall rules let you allow or deny connections to or from your VM instances based on a configuration you specify. +* [Managed Instance Groups (MIG)](https://cloud.google.com/compute/docs/instance-groups) - Instance group is a collection of VM instances that you can manage as a single entity. MIGs allow you to operate apps and workloads on multiple identical VMs. You can also leverage the various features like autoscaling, autohealing, regional / multi-zone deployments. + +## Costs + +Pricing Estimates - We have created a sample estimate based on some usage we see from new startups looking to scale. This estimate would give you an idea of how much this deployment would essentially cost per month at this scale and you extend it to the scale you further prefer. Here's the [link](https://cloud.google.com/products/calculator/#id=3105bbf2-4ee0-4289-978e-9ab6855d37ed). + +## Setup + +This solution assumes you already have a project created and set up where you wish to host these resources. If not, and you would like for the project to create a new project as well, please refer to the [github repository](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/blueprints/data-solutions/gcs-to-bq-with-least-privileges) for instructions. + +### Prerequisites + +* Have an [organization](https://cloud.google.com/resource-manager/docs/creating-managing-organization) set up in Google cloud. +* Have a [billing account](https://cloud.google.com/billing/docs/how-to/manage-billing-account) set up. +* Have an existing [project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) with [billing enabled](https://cloud.google.com/billing/docs/how-to/modify-project). + +### Roles & Permissions + +In order to spin up this architecture, you will need to be a user with the “__Project owner__” [IAM](https://cloud.google.com/iam) role on the existing project: + +Note: To grant a user a role, take a look at the [Granting and Revoking Access](https://cloud.google.com/iam/docs/granting-changing-revoking-access#grant-single-role) documentation. + +### Spinning up the architecture + +#### Step 1: Cloning the repository + +Click on the button below, sign in if required and when the prompt appears, click on “confirm”. + +[![Open Cloudshell](../../../assets/images/cloud-shell-button.png)](https://goo.gle/GoCloudArmor) + +This will clone the repository to your cloud shell and a screen like this one will appear: + +![cloud_shell](cloud_shell.png) + +Before we deploy the architecture, you will need the following information: + +* The __project ID__. + +#### Step 2: Deploying the resources + +1. After cloning the repo, and going through the prerequisites, head back to the cloud shell editor. +2. Make sure you’re in the following directory. if not, you can change your directory to it via the ‘cd’ command: + + cloudshell_open/cloud-foundation-fabric/blueprints/cloud-operations/glb_and_armor + +3. Run the following command to initialize the terraform working directory: + + terraform init + +4. Copy the following command into a console and replace __[my-project-id]__ with your project’s ID. Then run the following command to run the terraform script and create all relevant resources for this architecture: + + terraform apply -var project_id=[my-project-id] + +The resource creation will take a few minutes… but when it’s complete, you should see an output stating the command completed successfully with a list of the created resources. + +__Congratulations__! You have successfully deployed an HTTP Load Balancer with two Managed Instance Group backends and Cloud Armor security. + +## Testing your architecture + +1. Connect to the siege VM using SSH (from Cloud Console or CLI) and run the following command: + + siege -c 250 -t150s http://$LB_IP + +2. In the Cloud Console, on the Navigation menu, click __Network Services > Load balancing__. +3. Click __Backends__, then click __http-backend__ and navigate to __http-lb__ +4. Click on the __Monitoring__ tab. +5. Monitor the Frontend Location (Total inbound traffic) between North America and the two backends for 2 to 3 minutes. At first, traffic should just be directed to __us-east1-mig__ but as the RPS increases, traffic is also directed to __europe-west1-mig__. This demonstrates that by default traffic is forwarded to the closest backend but if the load is very high, traffic can be distributed across the backends. +6. Now, to test the IP deny-listing, rerun terraform as follows: + + terraform apply -var project_id=my-project-id -var enforce_security_policy=true + +This, applies a security policy to denylist the IP address of the siege VM + +7. To test this, from the siege VM run the following command and verify that you get a __403 Forbidden__ error code back. + + curl http://$LB_IP + +## Cleaning up your environment + +The easiest way to remove all the deployed resources is to run the following command in Cloud Shell: + + terraform destroy + +The above command will delete the associated resources so there will be no billable charges made afterwards. + + + +## Variables + +| name | description | type | required | default | +|---|---|:---:|:---:|:---:| +| [project_id](variables.tf#L26) | Identifier of the project. | string | ✓ | | +| [enforce_security_policy](variables.tf#L31) | Enforce security policy. | bool | | true | +| [prefix](variables.tf#L37) | Prefix used for created resources. | string | | null | +| [project_create](variables.tf#L17) | Parameters for the creation of the new project. | object({…}) | | null | + +## Outputs + +| name | description | sensitive | +|---|---|:---:| +| [glb_ip_address](outputs.tf#L18) | Load balancer IP address. | | +| [vm_siege_external_ip](outputs.tf#L23) | Siege VM external IP address. | | + + diff --git a/blueprints/networking/glb-and-armor/architecture.png b/blueprints/networking/glb-and-armor/architecture.png new file mode 100644 index 00000000..64b1e186 Binary files /dev/null and b/blueprints/networking/glb-and-armor/architecture.png differ diff --git a/blueprints/data-solutions/gcs-to-bq-with-least-privileges/images/cloud_shell.png b/blueprints/networking/glb-and-armor/cloud_shell.png similarity index 100% rename from blueprints/data-solutions/gcs-to-bq-with-least-privileges/images/cloud_shell.png rename to blueprints/networking/glb-and-armor/cloud_shell.png diff --git a/blueprints/networking/glb-and-armor/main.tf b/blueprints/networking/glb-and-armor/main.tf new file mode 100644 index 00000000..83622609 --- /dev/null +++ b/blueprints/networking/glb-and-armor/main.tf @@ -0,0 +1,286 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + prefix = (var.prefix == null || var.prefix == "") ? "" : "${var.prefix}-" +} + +module "project" { + source = "../../../modules/project" + billing_account = (var.project_create != null + ? var.project_create.billing_account_id + : null + ) + parent = (var.project_create != null + ? var.project_create.parent + : null + ) + prefix = var.project_create == null ? null : var.prefix + name = var.project_id + services = [ + "compute.googleapis.com" + ] + project_create = var.project_create != null +} + + +module "vpc" { + source = "../../../modules/net-vpc" + project_id = module.project.project_id + name = "${local.prefix}vpc" + subnets = [ + { + ip_cidr_range = "10.0.1.0/24" + name = "subnet-ew1" + region = "europe-west1" + }, + { + ip_cidr_range = "10.0.2.0/24" + name = "subnet-ue1" + region = "us-east1" + }, + { + ip_cidr_range = "10.0.3.0/24" + name = "subnet-uw1" + region = "us-west1" + } + ] +} + +module "firewall" { + source = "../../../modules/net-vpc-firewall" + project_id = module.project.project_id + network = module.vpc.name +} + +module "nat_ew1" { + source = "../../../modules/net-cloudnat" + project_id = module.project.project_id + region = "europe-west1" + name = "${local.prefix}nat-eu1" + router_network = module.vpc.name +} + +module "nat_ue1" { + source = "../../../modules/net-cloudnat" + project_id = module.project.project_id + region = "us-east1" + name = "${local.prefix}nat-ue1" + router_network = module.vpc.name +} + +module "instance_template_ew1" { + source = "../../../modules/compute-vm" + project_id = module.project.project_id + zone = "europe-west1-b" + name = "${local.prefix}europe-west1-template" + instance_type = "n1-standard-2" + network_interfaces = [{ + network = module.vpc.self_link + subnetwork = module.vpc.subnet_self_links["europe-west1/subnet-ew1"] + }] + boot_disk = { + image = "projects/debian-cloud/global/images/family/debian-11" + } + metadata = { + startup-script-url = "gs://cloud-training/gcpnet/httplb/startup.sh" + } + create_template = true + tags = [ + "http-server" + ] +} + +module "instance_template_ue1" { + source = "../../../modules/compute-vm" + project_id = module.project.project_id + zone = "us-east1-b" + name = "${local.prefix}us-east1-template" + network_interfaces = [{ + network = module.vpc.self_link + subnetwork = module.vpc.subnet_self_links["us-east1/subnet-ue1"] + }] + boot_disk = { + image = "projects/debian-cloud/global/images/family/debian-11" + } + metadata = { + startup-script-url = "gs://cloud-training/gcpnet/httplb/startup.sh" + } + create_template = true + tags = [ + "http-server" + ] +} + +module "vm_siege" { + source = "../../../modules/compute-vm" + project_id = module.project.project_id + zone = "us-west1-c" + name = "siege-vm" + instance_type = "n1-standard-2" + network_interfaces = [{ + network = module.vpc.self_link + subnetwork = module.vpc.subnet_self_links["us-west1/subnet-uw1"] + nat = true + }] + boot_disk = { + image = "projects/debian-cloud/global/images/family/debian-11" + } + metadata = { + startup-script = < ## Variables diff --git a/blueprints/networking/nginx-reverse-proxy-cluster/versions.tf b/blueprints/networking/nginx-reverse-proxy-cluster/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/nginx-reverse-proxy-cluster/versions.tf +++ b/blueprints/networking/nginx-reverse-proxy-cluster/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/networking/onprem-google-access-dns/main.tf b/blueprints/networking/onprem-google-access-dns/main.tf index 18159080..4a78f2ca 100644 --- a/blueprints/networking/onprem-google-access-dns/main.tf +++ b/blueprints/networking/onprem-google-access-dns/main.tf @@ -169,9 +169,9 @@ module "dns-gcp" { domain = "gcp.example.org." client_networks = [module.vpc.self_link] recordsets = { - "A localhost" = { ttl = 300, records = ["127.0.0.1"] } - "A test-1" = { ttl = 300, records = [module.vm-test1.internal_ip] } - "A test-2" = { ttl = 300, records = [module.vm-test2.internal_ip] } + "A localhost" = { records = ["127.0.0.1"] } + "A test-1" = { records = [module.vm-test1.internal_ip] } + "A test-2" = { records = [module.vm-test2.internal_ip] } } } @@ -183,9 +183,9 @@ module "dns-api" { domain = "googleapis.com." client_networks = [module.vpc.self_link] recordsets = { - "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } - "A private" = { ttl = 300, records = local.vips.private } - "A restricted" = { ttl = 300, records = local.vips.restricted } + "CNAME *" = { records = ["private.googleapis.com."] } + "A private" = { records = local.vips.private } + "A restricted" = { records = local.vips.restricted } } } diff --git a/blueprints/networking/onprem-google-access-dns/versions.tf b/blueprints/networking/onprem-google-access-dns/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/onprem-google-access-dns/versions.tf +++ b/blueprints/networking/onprem-google-access-dns/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/networking/private-cloud-function-from-onprem/main.tf b/blueprints/networking/private-cloud-function-from-onprem/main.tf index 528cabe0..e2f23f1f 100644 --- a/blueprints/networking/private-cloud-function-from-onprem/main.tf +++ b/blueprints/networking/private-cloud-function-from-onprem/main.tf @@ -218,7 +218,7 @@ module "private-dns-onprem" { domain = "${var.region}-${module.project.project_id}.cloudfunctions.net." client_networks = [module.vpc-onprem.self_link] recordsets = { - "A " = { ttl = 300, records = [module.addresses.psc_addresses[local.psc_name].address] } + "A " = { records = [module.addresses.psc_addresses[local.psc_name].address] } } } diff --git a/blueprints/networking/private-cloud-function-from-onprem/versions.tf b/blueprints/networking/private-cloud-function-from-onprem/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/private-cloud-function-from-onprem/versions.tf +++ b/blueprints/networking/private-cloud-function-from-onprem/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/networking/shared-vpc-gke/main.tf b/blueprints/networking/shared-vpc-gke/main.tf index 47b2b533..59d07d2d 100644 --- a/blueprints/networking/shared-vpc-gke/main.tf +++ b/blueprints/networking/shared-vpc-gke/main.tf @@ -157,8 +157,8 @@ module "host-dns" { domain = "example.com." client_networks = [module.vpc-shared.self_link] recordsets = { - "A localhost" = { ttl = 300, records = ["127.0.0.1"] } - "A bastion" = { ttl = 300, records = [module.vm-bastion.internal_ip] } + "A localhost" = { records = ["127.0.0.1"] } + "A bastion" = { records = [module.vm-bastion.internal_ip] } } } @@ -219,11 +219,13 @@ module "cluster-1" { } module "cluster-1-nodepool-1" { - source = "../../../modules/gke-nodepool" - count = var.cluster_create ? 1 : 0 - name = "nodepool-1" - project_id = module.project-svc-gke.project_id - location = module.cluster-1.0.location - cluster_name = module.cluster-1.0.name - service_account = {} + source = "../../../modules/gke-nodepool" + count = var.cluster_create ? 1 : 0 + name = "nodepool-1" + project_id = module.project-svc-gke.project_id + location = module.cluster-1.0.location + cluster_name = module.cluster-1.0.name + service_account = { + create = true + } } diff --git a/blueprints/networking/shared-vpc-gke/versions.tf b/blueprints/networking/shared-vpc-gke/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/networking/shared-vpc-gke/versions.tf +++ b/blueprints/networking/shared-vpc-gke/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/third-party-solutions/README.md b/blueprints/third-party-solutions/README.md index 10b7ced2..c7cbec73 100644 --- a/blueprints/third-party-solutions/README.md +++ b/blueprints/third-party-solutions/README.md @@ -7,3 +7,11 @@ The blueprints in this folder show how to automate installation of specific thir ### OpenShift cluster bootstrap on Shared VPC This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks. + +
+ +### Wordpress deployment on Cloud Run + + This [example](./wordpress/cloudrun/) shows how to deploy a functioning new Wordpress website exposed to the public internet via CloudRun and Cloud SQL, with minimal technical overhead. + +
diff --git a/blueprints/third-party-solutions/openshift/tf/versions.tf b/blueprints/third-party-solutions/openshift/tf/versions.tf index b1c8c910..286536a6 100644 --- a/blueprints/third-party-solutions/openshift/tf/versions.tf +++ b/blueprints/third-party-solutions/openshift/tf/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/blueprints/third-party-solutions/wordpress/cloudrun/README.md b/blueprints/third-party-solutions/wordpress/cloudrun/README.md index ee1e2d90..4ca10796 100644 --- a/blueprints/third-party-solutions/wordpress/cloudrun/README.md +++ b/blueprints/third-party-solutions/wordpress/cloudrun/README.md @@ -36,11 +36,11 @@ If `project_create` is left to null, the identity performing the deployment need If you want to deploy from your Cloud Shell, click on the image below, sign in if required and when the prompt appears, click on “confirm”. -[![Open Cloudshell](images/button.png)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fcloud-foundation-fabric&cloudshell_workspace=blueprints%2Fthird-party-solutions%2Fwordpress%2Fcloudrun) - +[![Open Cloudshell](../../../../assets/images/cloud-shell-button.png)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fcloud-foundation-fabric&cloudshell_workspace=blueprints%2Fthird-party-solutions%2Fwordpress%2Fcloudrun) Otherwise, in your console of choice: -``` {shell} + +```bash git clone https://github.com/GoogleCloudPlatform/cloud-foundation-fabric ``` @@ -70,6 +70,7 @@ Once you have the required information, head back to your cloned repository. Mak Configure the Terraform variables in your `terraform.tfvars` file. See [terraform.tfvars.sample](terraform.tfvars.sample) as starting point - just copy it to `terraform.tfvars` and edit the latter. See the variables documentation below. **Notes**: + 1. If you will want to change your admin password later on, please note that it will only work in the admin interface of Wordpress, but not with redeploying with Terraform, since Wordpress writes that password into the database upon installation and ignores the environment variables (that you can change with Terraform) after that. 2. If you have the [domain restriction org. policy](https://cloud.google.com/resource-manager/docs/organization-policy/restricting-domains) on your organization, you have to edit the `cloud_run_invoker` variable and give it a value that will be accepted in accordance to your policy. @@ -81,22 +82,27 @@ Initialize your Terraform environment and deploy the resources: terraform init terraform apply ``` + The resource creation will take a few minutes. **Note**: you might get the following error (or a similar one): + ``` {shell} │ Error: resource is in failed state "Ready:False", message: Revision '...' is not ready and cannot serve traffic.│ ``` + You might try to reapply at this point, the Cloud Run service just needs several minutes. ### Step 4: Use the created resources Upon completion, you will see the output with the values for the Cloud Run service and the user and password to access the `/admin` part of the website. You can also view it later with: + ``` {shell} terraform output # or for the concrete variable: terraform output cloud_run_service ``` + 1. Open your browser at the URL that you get with that last command, and you will see your Wordpress installation. 2. Add "/admin" in the end of the URL and log in to the admin interface, using the outputs "wp_user" and "wp_password". diff --git a/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png b/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png deleted file mode 100644 index 21a3f3de..00000000 Binary files a/blueprints/third-party-solutions/wordpress/cloudrun/images/button.png and /dev/null differ diff --git a/default-versions.tf b/default-versions.tf index 6862241d..286536a6 100644 --- a/default-versions.tf +++ b/default-versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" + version = ">= 4.40.0" # tftest } } } diff --git a/fast/stages/00-bootstrap/automation.tf b/fast/stages/00-bootstrap/automation.tf index cf48d9cd..13eb68f1 100644 --- a/fast/stages/00-bootstrap/automation.tf +++ b/fast/stages/00-bootstrap/automation.tf @@ -72,6 +72,7 @@ module "automation-project" { "essentialcontacts.googleapis.com", "iam.googleapis.com", "iamcredentials.googleapis.com", + "orgpolicy.googleapis.com", "pubsub.googleapis.com", "servicenetworking.googleapis.com", "serviceusage.googleapis.com", diff --git a/fast/stages/01-resman/branch-sandbox.tf b/fast/stages/01-resman/branch-sandbox.tf index 7ed154aa..84995c15 100644 --- a/fast/stages/01-resman/branch-sandbox.tf +++ b/fast/stages/01-resman/branch-sandbox.tf @@ -32,16 +32,9 @@ module "branch-sandbox-folder" { "roles/resourcemanager.folderAdmin" = [module.branch-sandbox-sa.0.iam_email] "roles/resourcemanager.projectCreator" = [module.branch-sandbox-sa.0.iam_email] } - policy_boolean = { - "constraints/sql.restrictPublicIp" = false - } - policy_list = { - "constraints/compute.vmExternalIpAccess" = { - inherit_from_parent = false - suggested_value = null - status = true - values = [] - } + org_policies = { + "constraints/sql.restrictPublicIp" = { enforce = false } + "constraints/compute.vmExternalIpAccess" = { allow = { all = true } } } tag_bindings = { context = try( diff --git a/fast/stages/01-resman/organization.tf b/fast/stages/01-resman/organization.tf index 6596f9c0..40a789ee 100644 --- a/fast/stages/01-resman/organization.tf +++ b/fast/stages/01-resman/organization.tf @@ -18,18 +18,11 @@ locals { - list_allow = { - inherit_from_parent = false - suggested_value = null - status = true - values = [] - } - list_deny = { - inherit_from_parent = false - suggested_value = null - status = false - values = [] - } + all_drs_domains = concat( + [var.organization.customer_id], + try(local.policy_configs.allowed_policy_member_domains, []) + ) + policy_configs = ( var.organization_policy_configs == null ? {} @@ -74,74 +67,54 @@ module "organization" { } : {} ) # sample subset of useful organization policies, edit to suit requirements - policy_boolean = { - # "constraints/cloudfunctions.requireVPCConnector" = true - # "constraints/compute.disableGuestAttributesAccess" = true - # "constraints/compute.disableInternetNetworkEndpointGroup" = true - # "constraints/compute.disableNestedVirtualization" = true - # "constraints/compute.disableSerialPortAccess" = true - "constraints/compute.requireOsLogin" = true - # "constraints/compute.restrictXpnProjectLienRemoval" = true - "constraints/compute.skipDefaultNetworkCreation" = true - # "constraints/compute.setNewProjectDefaultToZonalDNSOnly" = true - "constraints/iam.automaticIamGrantsForDefaultServiceAccounts" = true - "constraints/iam.disableServiceAccountKeyCreation" = true - # "constraints/iam.disableServiceAccountKeyUpload" = true - "constraints/sql.restrictPublicIp" = true - "constraints/sql.restrictAuthorizedNetworks" = true - "constraints/storage.uniformBucketLevelAccess" = true - } - policy_list = { - # "constraints/cloudfunctions.allowedIngressSettings" = merge( - # local.list_allow, { values = ["is:ALLOW_INTERNAL_ONLY"] } - # ) - # "constraints/cloudfunctions.allowedVpcConnectorEgressSettings" = merge( - # local.list_allow, { values = ["is:PRIVATE_RANGES_ONLY"] } - # ) - "constraints/compute.restrictLoadBalancerCreationForTypes" = merge( - local.list_allow, { values = ["in:INTERNAL"] } - ) - "constraints/compute.vmExternalIpAccess" = local.list_deny - "constraints/iam.allowedPolicyMemberDomains" = merge( - local.list_allow, { - values = concat( - [var.organization.customer_id], - try(local.policy_configs.allowed_policy_member_domains, []) - ) - }) - "constraints/run.allowedIngress" = merge( - local.list_allow, { values = ["is:internal"] } - ) - # "constraints/run.allowedVPCEgress" = merge( - # local.list_allow, { values = ["is:private-ranges-only"] } - # ) - # "constraints/compute.restrictCloudNATUsage" = local.list_deny - # "constraints/compute.restrictDedicatedInterconnectUsage" = local.list_deny - # "constraints/compute.restrictPartnerInterconnectUsage" = local.list_deny - # "constraints/compute.restrictProtocolForwardingCreationForTypes" = local.list_deny - # "constraints/compute.restrictSharedVpcHostProjects" = local.list_deny - # "constraints/compute.restrictSharedVpcSubnetworks" = local.list_deny - # "constraints/compute.restrictVpcPeering" = local.list_deny - # "constraints/compute.restrictVpnPeerIPs" = local.list_deny - # "constraints/compute.vmCanIpForward" = local.list_deny - # "constraints/gcp.resourceLocations" = { - # inherit_from_parent = false - # suggested_value = null - # status = true - # values = local.allowed_regions + + org_policies = { + "compute.disableGuestAttributesAccess" = { enforce = true } + "compute.requireOsLogin" = { enforce = true } + "compute.restrictLoadBalancerCreationForTypes" = { allow = { values = ["in:INTERNAL"] } } + "compute.skipDefaultNetworkCreation" = { enforce = true } + "compute.vmExternalIpAccess" = { deny = { all = true } } + "iam.allowedPolicyMemberDomains" = { allow = { values = local.all_drs_domains } } + "iam.automaticIamGrantsForDefaultServiceAccounts" = { enforce = true } + "iam.disableServiceAccountKeyCreation" = { enforce = true } + "iam.disableServiceAccountKeyUpload" = { enforce = true } + "run.allowedIngress" = { allow = { values = ["is:internal"] } } + "sql.restrictAuthorizedNetworks" = { enforce = true } + "sql.restrictPublicIp" = { enforce = true } + "storage.uniformBucketLevelAccess" = { enforce = true } + # "cloudfunctions.allowedIngressSettings" = { + # allow = { values = ["is:ALLOW_INTERNAL_ONLY"] } # } - # https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#restrict - # "constraints/iam.workloadIdentityPoolProviders" = merge( - # local.list_allow, { values = [ - # for k, v in coalesce(var.automation.federated_identity_providers, {}) : - # v.issuer_uri - # ] } - # ) - # "constraints/iam.workloadIdentityPoolAwsAccounts" = merge( - # local.list_allow, { values = [ - # - # ] } - # ) + # "cloudfunctions.allowedVpcConnectorEgressSettings" = { + # allow = { values = ["is:PRIVATE_RANGES_ONLY"] } + # } + # "cloudfunctions.requireVPCConnector" = { enforce = true } + # "compute.disableInternetNetworkEndpointGroup" = { enforce = true } + # "compute.disableNestedVirtualization" = { enforce = true } + # "compute.disableSerialPortAccess" = { enforce = true } + # "compute.restrictCloudNATUsage" = { deny = { all = true }} + # "compute.restrictDedicatedInterconnectUsage" = { deny = { all = true }} + # "compute.restrictPartnerInterconnectUsage" = { deny = { all = true }} + # "compute.restrictProtocolForwardingCreationForTypes" = { deny = { all = true }} + # "compute.restrictSharedVpcHostProjects" = { deny = { all = true }} + # "compute.restrictSharedVpcSubnetworks" = { deny = { all = true }} + # "compute.restrictVpcPeering" = { deny = { all = true }} + # "compute.restrictVpnPeerIPs" = { deny = { all = true }} + # "compute.restrictXpnProjectLienRemoval" = { enforce = true } + # "compute.setNewProjectDefaultToZonalDNSOnly" = { enforce = true } + # "compute.vmCanIpForward" = { deny = { all = true }} + # "gcp.resourceLocations" = { + # allow = { values = local.allowed_regions } + # } + # "iam.workloadIdentityPoolProviders" = { + # allow = { + # values = [ + # for k, v in coalesce(var.automation.federated_identity_providers, {}) : + # v.issuer_uri + # ] + # } + # } + # "run.allowedVPCEgress" = { allow = { values = ["is:private-ranges-only"] } } } tags = { (var.tag_names.context) = { diff --git a/fast/stages/02-networking-nva/README.md b/fast/stages/02-networking-nva/README.md index 84c236cf..cddfddaa 100644 --- a/fast/stages/02-networking-nva/README.md +++ b/fast/stages/02-networking-nva/README.md @@ -172,6 +172,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed to the VPN tunnels from on-premises, and the following names should be configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-nva/dns-dev.tf b/fast/stages/02-networking-nva/dns-dev.tf index 08c99486..4eb472a1 100644 --- a/fast/stages/02-networking-nva/dns-dev.tf +++ b/fast/stages/02-networking-nva/dns-dev.tf @@ -26,7 +26,7 @@ module "dev-dns-private-zone" { domain = "dev.gcp.example.com." client_networks = [module.landing-trusted-vpc.self_link, module.landing-untrusted-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/02-networking-nva/dns-landing.tf b/fast/stages/02-networking-nva/dns-landing.tf index f177dcda..40090279 100644 --- a/fast/stages/02-networking-nva/dns-landing.tf +++ b/fast/stages/02-networking-nva/dns-landing.tf @@ -55,11 +55,11 @@ module "gcp-example-dns-private-zone" { module.landing-trusted-vpc.self_link ] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -72,12 +72,84 @@ module "googleapis-private-zone" { module.landing-trusted-vpc.self_link ] recordsets = { - "A private" = { type = "A", ttl = 300, records = [ + "A private" = { records = [ "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" ] } - "A restricted" = { type = "A", ttl = 300, records = [ + "A restricted" = { records = [ "199.36.153.4", "199.36.153.5", "199.36.153.6", "199.36.153.7" ] } - "CNAME *" = { type = "CNAME", ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } + } +} + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [ + module.landing-untrusted-vpc.self_link, + module.landing-trusted-vpc.self_link + ] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } } } diff --git a/fast/stages/02-networking-nva/dns-prod.tf b/fast/stages/02-networking-nva/dns-prod.tf index 335f1508..b54609df 100644 --- a/fast/stages/02-networking-nva/dns-prod.tf +++ b/fast/stages/02-networking-nva/dns-prod.tf @@ -26,7 +26,7 @@ module "prod-dns-private-zone" { domain = "prod.gcp.example.com." client_networks = [module.landing-trusted-vpc.self_link, module.landing-untrusted-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/02-networking-nva/nva.tf b/fast/stages/02-networking-nva/nva.tf index 4e70d02f..f4f7b9e5 100644 --- a/fast/stages/02-networking-nva/nva.tf +++ b/fast/stages/02-networking-nva/nva.tf @@ -15,7 +15,7 @@ */ locals { - # routing_config should be aligned to the NVA network interfaces - i.e. + # routing_config should be aligned to the NVA network interfaces - i.e. # local.routing_config[0] sets up the first interface, and so on. routing_config = [ { @@ -94,27 +94,21 @@ module "nva-template" { } module "nva-mig" { - for_each = local.nva_locality - source = "../../../modules/compute-mig" - project_id = module.landing-project.project_id - regional = true - location = each.value.region - name = "nva-cos-${each.value.trigram}-${each.value.zone}" - target_size = 1 - # FIXME: cycle - # auto_healing_policies = { - # health_check = module.nva-mig[each.key].health_check.self_link - # initial_delay_sec = 30 - # } - health_check_config = { - type = "tcp" - check = { port = 22 } - config = {} - logging = true + for_each = local.nva_locality + source = "../../../modules/compute-mig" + project_id = module.landing-project.project_id + location = each.value.region + name = "nva-cos-${each.value.trigram}-${each.value.zone}" + instance_template = module.nva-template[each.key].template.self_link + target_size = 1 + auto_healing_policies = { + initial_delay_sec = 30 } - default_version = { - instance_template = module.nva-template[each.key].template.self_link - name = "default" + health_check_config = { + enable_logging = true + tcp = { + port = 22 + } } } @@ -126,16 +120,20 @@ module "ilb-nva-untrusted" { name = "nva-untrusted-${each.value.0}" service_label = var.prefix global_access = true - network = module.landing-untrusted-vpc.self_link - subnetwork = module.landing-untrusted-vpc.subnet_self_links["${each.key}/landing-untrusted-default-${each.value.0}"] - backends = [for key, _ in local.nva_locality : - { - failover = false - group = module.nva-mig[key].group_manager.instance_group - balancing_mode = "CONNECTION" - } if local.nva_locality[key].region == each.key] + vpc_config = { + network = module.landing-untrusted-vpc.self_link + subnetwork = module.landing-untrusted-vpc.subnet_self_links["${each.key}/landing-untrusted-default-${each.value.0}"] + } + backends = [ + for key, _ in local.nva_locality : { + group = module.nva-mig[key].group_manager.instance_group + } if local.nva_locality[key].region == each.key + ] health_check_config = { - type = "tcp", check = { port = 22 }, config = {}, logging = false + enable_logging = true + tcp = { + port = 22 + } } } @@ -148,16 +146,20 @@ module "ilb-nva-trusted" { name = "nva-trusted-${each.value.0}" service_label = var.prefix global_access = true - network = module.landing-trusted-vpc.self_link - subnetwork = module.landing-trusted-vpc.subnet_self_links["${each.key}/landing-trusted-default-${each.value.0}"] - backends = [for key, _ in local.nva_locality : - { - failover = false - group = module.nva-mig[key].group_manager.instance_group - balancing_mode = "CONNECTION" - } if local.nva_locality[key].region == each.key] + vpc_config = { + network = module.landing-trusted-vpc.self_link + subnetwork = module.landing-trusted-vpc.subnet_self_links["${each.key}/landing-trusted-default-${each.value.0}"] + } + backends = [ + for key, _ in local.nva_locality : { + group = module.nva-mig[key].group_manager.instance_group + } if local.nva_locality[key].region == each.key + ] health_check_config = { - type = "tcp", check = { port = 22 }, config = {}, logging = false + enable_logging = true + tcp = { + port = 22 + } } } diff --git a/fast/stages/02-networking-peering/README.md b/fast/stages/02-networking-peering/README.md index 0e5c72a7..1dfdb9a5 100644 --- a/fast/stages/02-networking-peering/README.md +++ b/fast/stages/02-networking-peering/README.md @@ -102,6 +102,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-peering/dns-dev.tf b/fast/stages/02-networking-peering/dns-dev.tf index aad50afc..03ae0122 100644 --- a/fast/stages/02-networking-peering/dns-dev.tf +++ b/fast/stages/02-networking-peering/dns-dev.tf @@ -26,7 +26,7 @@ module "dev-dns-private-zone" { domain = "dev.gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/02-networking-peering/dns-landing.tf b/fast/stages/02-networking-peering/dns-landing.tf index b1d766ab..7b97a8cf 100644 --- a/fast/stages/02-networking-peering/dns-landing.tf +++ b/fast/stages/02-networking-peering/dns-landing.tf @@ -46,11 +46,11 @@ module "gcp-example-dns-private-zone" { domain = "gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -60,12 +60,72 @@ module "googleapis-private-zone" { domain = "googleapis.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A private" = { type = "A", ttl = 300, records = [ + "A private" = { records = [ "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" ] } - "A restricted" = { type = "A", ttl = 300, records = [ + "A restricted" = { records = [ "199.36.153.4", "199.36.153.5", "199.36.153.6", "199.36.153.7" ] } - "CNAME *" = { type = "CNAME", ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } + } +} + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } } } diff --git a/fast/stages/02-networking-peering/dns-prod.tf b/fast/stages/02-networking-peering/dns-prod.tf index a4a916b4..5bb695fd 100644 --- a/fast/stages/02-networking-peering/dns-prod.tf +++ b/fast/stages/02-networking-peering/dns-prod.tf @@ -26,7 +26,7 @@ module "prod-dns-private-zone" { domain = "prod.gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/02-networking-separate-envs/README.md b/fast/stages/02-networking-separate-envs/README.md index 2329aad4..6fdb00cf 100644 --- a/fast/stages/02-networking-separate-envs/README.md +++ b/fast/stages/02-networking-separate-envs/README.md @@ -69,6 +69,13 @@ DNS often goes hand in hand with networking, especially on GCP where Cloud DNS z - on-prem to cloud via private zones for cloud-managed domains, and an [inbound policy](https://cloud.google.com/dns/docs/server-policies-overview#dns-server-policy-in) used as forwarding target or via delegation (requires some extra configuration) from on-prem DNS resolvers - cloud to on-prem via forwarding zones for the on-prem managed domains +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-separate-envs/dns-dev.tf b/fast/stages/02-networking-separate-envs/dns-dev.tf index 355938b2..25adab5e 100644 --- a/fast/stages/02-networking-separate-envs/dns-dev.tf +++ b/fast/stages/02-networking-separate-envs/dns-dev.tf @@ -26,7 +26,7 @@ module "dev-dns-private-zone" { domain = "dev.gcp.example.com." client_networks = [module.dev-spoke-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } @@ -50,6 +50,8 @@ module "dev-reverse-10-dns-forwarding" { forwarders = { for ip in var.dns.dev : ip => null } } +# Google APIs + module "dev-googleapis-private-zone" { source = "../../../modules/dns" project_id = module.dev-spoke-project.project_id @@ -58,12 +60,72 @@ module "dev-googleapis-private-zone" { domain = "googleapis.com." client_networks = [module.dev-spoke-vpc.self_link] recordsets = { - "A private" = { type = "A", ttl = 300, records = [ + "A private" = { records = [ "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" ] } - "A restricted" = { type = "A", ttl = 300, records = [ + "A restricted" = { records = [ "199.36.153.4", "199.36.153.5", "199.36.153.6", "199.36.153.7" ] } - "CNAME *" = { type = "CNAME", ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } + } +} + +module "dev-gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-packages-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "dev-pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.dev-spoke-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.dev-spoke-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } } } diff --git a/fast/stages/02-networking-separate-envs/dns-prod.tf b/fast/stages/02-networking-separate-envs/dns-prod.tf index 3dc7ff43..47c8cdca 100644 --- a/fast/stages/02-networking-separate-envs/dns-prod.tf +++ b/fast/stages/02-networking-separate-envs/dns-prod.tf @@ -26,7 +26,7 @@ module "prod-dns-private-zone" { domain = "prod.gcp.example.com." client_networks = [module.prod-spoke-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } @@ -50,6 +50,7 @@ module "prod-reverse-10-dns-forwarding" { forwarders = { for ip in var.dns.prod : ip => null } } +# Google APIs module "prod-googleapis-private-zone" { source = "../../../modules/dns" @@ -59,12 +60,72 @@ module "prod-googleapis-private-zone" { domain = "googleapis.com." client_networks = [module.prod-spoke-vpc.self_link] recordsets = { - "A private" = { type = "A", ttl = 300, records = [ + "A private" = { records = [ "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" ] } - "A restricted" = { type = "A", ttl = 300, records = [ + "A restricted" = { records = [ "199.36.153.4", "199.36.153.5", "199.36.153.6", "199.36.153.7" ] } - "CNAME *" = { type = "CNAME", ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } + } +} + +module "prod-gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-packages-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "prod-pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.prod-spoke-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.prod-spoke-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } } } diff --git a/fast/stages/02-networking-vpn/README.md b/fast/stages/02-networking-vpn/README.md index 010b2246..783b11fb 100644 --- a/fast/stages/02-networking-vpn/README.md +++ b/fast/stages/02-networking-vpn/README.md @@ -108,6 +108,13 @@ DNS configuration is further centralized by leveraging peering zones, so that - the hub/landing Cloud DNS hosts configurations for on-prem forwarding, Google API domains, and the top-level private zone/s (e.g. gcp.example.com) - the spokes Cloud DNS host configurations for the environment-specific domains (e.g. prod.gcp.example.com), which are bound to the hub/landing leveraging [cross-project binding](https://cloud.google.com/dns/docs/zones/zones-overview#cross-project_binding); a peering zone for the `.` (root) zone is then created on each spoke, delegating all DNS resolution to hub/landing. +- Private Google Access is enabled for a selection of the [supported domains](https://cloud.google.com/vpc/docs/configure-private-google-access#domain-options), namely + - `private.googleapis.com` + - `restricted.googleapis.com` + - `gcr.io` + - `packages.cloud.google.com` + - `pkg.dev` + - `pki.goog` To complete the configuration, the 35.199.192.0/19 range should be routed on the VPN tunnels from on-prem, and the following names configured for DNS forwarding to cloud: diff --git a/fast/stages/02-networking-vpn/dns-dev.tf b/fast/stages/02-networking-vpn/dns-dev.tf index aad50afc..03ae0122 100644 --- a/fast/stages/02-networking-vpn/dns-dev.tf +++ b/fast/stages/02-networking-vpn/dns-dev.tf @@ -26,7 +26,7 @@ module "dev-dns-private-zone" { domain = "dev.gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/02-networking-vpn/dns-landing.tf b/fast/stages/02-networking-vpn/dns-landing.tf index b1d766ab..7b97a8cf 100644 --- a/fast/stages/02-networking-vpn/dns-landing.tf +++ b/fast/stages/02-networking-vpn/dns-landing.tf @@ -46,11 +46,11 @@ module "gcp-example-dns-private-zone" { domain = "gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } -# Google API zone to trigger Private Access +# Google APIs module "googleapis-private-zone" { source = "../../../modules/dns" @@ -60,12 +60,72 @@ module "googleapis-private-zone" { domain = "googleapis.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A private" = { type = "A", ttl = 300, records = [ + "A private" = { records = [ "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" ] } - "A restricted" = { type = "A", ttl = 300, records = [ + "A restricted" = { records = [ "199.36.153.4", "199.36.153.5", "199.36.153.6", "199.36.153.7" ] } - "CNAME *" = { type = "CNAME", ttl = 300, records = ["private.googleapis.com."] } + "CNAME *" = { records = ["private.googleapis.com."] } + } +} + +module "gcrio-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "gcr-io" + domain = "gcr.io." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A gcr.io." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "packages-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "packages-cloud" + domain = "packages.cloud.google.com." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A packages.cloud.google.com." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkgdev-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pkg-dev" + domain = "pkg.dev." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pkg.dev." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } + } +} + +module "pkigoog-private-zone" { + source = "../../../modules/dns" + project_id = module.landing-project.project_id + type = "private" + name = "pki-goog" + domain = "pki.goog." + client_networks = [module.landing-vpc.self_link] + recordsets = { + "A pki.goog." = { ttl = 300, records = [ + "199.36.153.8", "199.36.153.9", "199.36.153.10", "199.36.153.11" + ] } + "CNAME *" = { ttl = 300, records = ["private.googleapis.com."] } } } diff --git a/fast/stages/02-networking-vpn/dns-prod.tf b/fast/stages/02-networking-vpn/dns-prod.tf index a4a916b4..5bb695fd 100644 --- a/fast/stages/02-networking-vpn/dns-prod.tf +++ b/fast/stages/02-networking-vpn/dns-prod.tf @@ -26,7 +26,7 @@ module "prod-dns-private-zone" { domain = "prod.gcp.example.com." client_networks = [module.landing-vpc.self_link] recordsets = { - "A localhost" = { type = "A", ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } } } diff --git a/fast/stages/03-gke-multitenant/dev/README.md b/fast/stages/03-gke-multitenant/dev/README.md index ac4e03d3..f3abf494 100644 --- a/fast/stages/03-gke-multitenant/dev/README.md +++ b/fast/stages/03-gke-multitenant/dev/README.md @@ -142,21 +142,21 @@ terraform apply |---|---|:---:|:---:|:---:|:---:| | [automation](variables.tf#L21) | Automation resources created by the bootstrap stage. | object({…}) | ✓ | | 00-bootstrap | | [billing_account](variables.tf#L29) | Billing account id and organization id ('nnnnnnnn' or null). | object({…}) | ✓ | | 00-bootstrap | -| [folder_ids](variables.tf#L146) | Folders to be used for the networking resources in folders/nnnnnnnnnnn format. If null, folder will be created. | object({…}) | ✓ | | 01-resman | -| [host_project_ids](variables.tf#L168) | Host project for the shared VPC. | object({…}) | ✓ | | 02-networking | -| [prefix](variables.tf#L210) | Prefix used for resources that need unique names. | string | ✓ | | | -| [vpc_self_links](variables.tf#L222) | Self link for the shared VPC. | object({…}) | ✓ | | 02-networking | -| [clusters](variables.tf#L38) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | -| [fleet_configmanagement_clusters](variables.tf#L83) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | -| [fleet_configmanagement_templates](variables.tf#L91) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | | -| [fleet_features](variables.tf#L126) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | -| [fleet_workload_identity](variables.tf#L139) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | -| [group_iam](variables.tf#L154) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | -| [iam](variables.tf#L161) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | -| [labels](variables.tf#L176) | Project-level labels. | map(string) | | {} | | -| [nodepools](variables.tf#L182) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | | -| [outputs_location](variables.tf#L204) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | string | | null | | -| [project_services](variables.tf#L215) | Additional project services to enable. | list(string) | | [] | | +| [folder_ids](variables.tf#L149) | Folders to be used for the networking resources in folders/nnnnnnnnnnn format. If null, folder will be created. | object({…}) | ✓ | | 01-resman | +| [host_project_ids](variables.tf#L171) | Host project for the shared VPC. | object({…}) | ✓ | | 02-networking | +| [prefix](variables.tf#L213) | Prefix used for resources that need unique names. | string | ✓ | | | +| [vpc_self_links](variables.tf#L225) | Self link for the shared VPC. | object({…}) | ✓ | | 02-networking | +| [clusters](variables.tf#L38) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | +| [fleet_configmanagement_clusters](variables.tf#L86) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | +| [fleet_configmanagement_templates](variables.tf#L94) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | | +| [fleet_features](variables.tf#L129) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | +| [fleet_workload_identity](variables.tf#L142) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | +| [group_iam](variables.tf#L157) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | +| [iam](variables.tf#L164) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | +| [labels](variables.tf#L179) | Project-level labels. | map(string) | | {} | | +| [nodepools](variables.tf#L185) | Nodepools configuration. Refer to the gke-nodepool module for type details. | map(map(object({…}))) | | {} | | +| [outputs_location](variables.tf#L207) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | string | | null | | +| [project_services](variables.tf#L218) | Additional project services to enable. | list(string) | | [] | | ## Outputs diff --git a/fast/stages/03-gke-multitenant/dev/variables.tf b/fast/stages/03-gke-multitenant/dev/variables.tf index 1a17da4b..9c5a1d38 100644 --- a/fast/stages/03-gke-multitenant/dev/variables.tf +++ b/fast/stages/03-gke-multitenant/dev/variables.tf @@ -55,9 +55,12 @@ variable "clusters" { recurring_window = null maintenance_exclusion = [] }) - max_pods_per_node = optional(number, 110) - min_master_version = optional(string) - monitoring_config = optional(list(string), ["SYSTEM_COMPONENTS"]) + max_pods_per_node = optional(number, 110) + min_master_version = optional(string) + monitoring_config = optional(object({ + enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) + managed_prometheus = optional(bool) + })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) diff --git a/fast/stages/03-project-factory/dev/data/projects/project.yaml.sample b/fast/stages/03-project-factory/dev/data/projects/project.yaml.sample index 13a8f5f5..88ba0bf5 100644 --- a/fast/stages/03-project-factory/dev/data/projects/project.yaml.sample +++ b/fast/stages/03-project-factory/dev/data/projects/project.yaml.sample @@ -48,15 +48,15 @@ labels: # [opt] Org policy overrides defined at project level org_policies: - policy_boolean: - constraints/compute.disableGuestAttributesAccess: true - policy_list: - constraints/compute.trustedImageProjects: - inherit_from_parent: null - status: true - suggested_value: null + constraints/compute.disableGuestAttributesAccess: + enforce: true + constraints/compute.trustedImageProjects: + allow: values: - projects/fast-dev-iac-core-0 + constraints/compute.vmExternalIpAccess: + deny: + all: true # [opt] Service account to create for the project and their roles on the project # in name => [roles] format diff --git a/modules/README.md b/modules/README.md index 92cf25fc..9995c4eb 100644 --- a/modules/README.md +++ b/modules/README.md @@ -13,11 +13,12 @@ These modules are not necessarily backward compatible. Changes breaking compatib These modules are used in the examples included in this repository. If you are using any of those examples in your own Terraform configuration, make sure that you are using the same version for all the modules, and switch module sources to GitHub format using references. The recommended approach to working with Fabric modules is the following: - Fork the repository and own the fork. This will allow you to: - - Evolve the existing modules. - - Create your own modules. - - Sync from the upstream repository to get all the updates. - + - Evolve the existing modules. + - Create your own modules. + - Sync from the upstream repository to get all the updates. + - Use GitHub sources with refs to reference the modules. See an example below: + ```terraform module "project" { source = "github.com/GoogleCloudPlatform/cloud-foundation-fabric//modules/project?ref=v13.0.0" @@ -30,62 +31,64 @@ These modules are used in the examples included in this repository. If you are u ## Foundational modules - [billing budget](./billing-budget) +- [Cloud Identity group](./cloud-identity-group/) - [folder](./folder) +- [service accounts](./iam-service-account) - [logging bucket](./logging-bucket) - [organization](./organization) - [project](./project) - [projects-data-source](./projects-data-source) -- [service account](./iam-service-account) -- [organization policy](./organization-policy) ## Networking modules -- [address reservation](./net-address) -- [Cloud DNS](./dns) -- [Cloud NAT](./net-cloudnat) +- [DNS](./dns) - [Cloud Endpoints](./endpoints) -- [L4 Internal Load Balancer](./net-ilb) -- [Service Directory](./service-directory) +- [address reservation](./net-address) +- [NAT](./net-cloudnat) +- [Global Load Balancer (classic)](./net-glb/) +- [L4 ILB](./net-ilb) +- [L7 ILB](./net-ilb-l7) - [VPC](./net-vpc) - [VPC firewall](./net-vpc-firewall) - [VPC peering](./net-vpc-peering) -- [VPN static](./net-vpn-static) - [VPN dynamic](./net-vpn-dynamic) - [HA VPN](./net-vpn-ha) -- [ ] TODO: xLB modules +- [VPN static](./net-vpn-static) +- [Service Directory](./service-directory) ## Compute/Container -- [COS container](./cloud-config-container/onprem/) (coredns, mysql, onprem, squid) -- [GKE cluster](./gke-cluster) -- [GKE nodepool](./gke-nodepool) -- [GKE hub](./gke-hub) -- [Managed Instance Group](./compute-mig) - [VM/VM group](./compute-vm) +- [MIG](./compute-mig) +- [COS container](./cloud-config-container/cos-generic-metadata/) (coredns/mysql/nva/onprem/squid) +- [GKE cluster](./gke-cluster) +- [GKE hub](./gke-hub) +- [GKE nodepool](./gke-nodepool) ## Data - [BigQuery dataset](./bigquery-dataset) -- [Datafusion](./datafusion) -- [GCS](./gcs) -- [Pub/Sub](./pubsub) - [Bigtable instance](./bigtable-instance) - [Cloud SQL instance](./cloudsql-instance) - [Data Catalog Policy Tag](./data-catalog-policy-tag) +- [Datafusion](./datafusion) +- [GCS](./gcs) +- [Pub/Sub](./pubsub) ## Development -- [Artifact Registry](./artifact-registry) -- [Container Registry](./container-registry) -- [Source Repository](./source-repository) +- [API Gateway](./api-gateway) - [Apigee Organization](./apigee-organization) - [Apigee X Instance](./apigee-x-instance) -- [API Gateway](./api-gateway) +- [Artifact Registry](./artifact-registry) +- [Container Registry](./container-registry) +- [Cloud Source Repository](./source-repository) ## Security -- [Cloud KMS](./kms) -- [Secret Manager](./secret-manager) +- [Binauthz](./binauthz/) +- [KMS](./kms) +- [SecretManager](./secret-manager) - [VPC Service Control](./vpc-sc) ## Serverless diff --git a/modules/__experimental/net-neg/versions.tf b/modules/__experimental/net-neg/versions.tf index b1c8c910..286536a6 100644 --- a/modules/__experimental/net-neg/versions.tf +++ b/modules/__experimental/net-neg/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/api-gateway/versions.tf b/modules/api-gateway/versions.tf index b1c8c910..286536a6 100644 --- a/modules/api-gateway/versions.tf +++ b/modules/api-gateway/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/apigee-organization/README.md b/modules/apigee-organization/README.md index eceb4d13..150553a1 100644 --- a/modules/apigee-organization/README.md +++ b/modules/apigee-organization/README.md @@ -13,10 +13,16 @@ module "apigee-organization" { analytics_region = "us-central1" runtime_type = "CLOUD" authorized_network = "my-vpc" - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + } apigee_envgroups = { eval = { environments = [ @@ -42,12 +48,18 @@ module "apigee-organization" { runtime_type = "CLOUD" authorized_network = "my-vpc" database_encryption_key = "my-data-key" - apigee_environments = [ - "dev1", - "dev2", - "test1", - "test2" - ] + apigee_environments = { + dev1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + dev2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + test1 = {} + test2 = {} + } apigee_envgroups = { dev = { environments = [ @@ -80,10 +92,13 @@ module "apigee-organization" { project_id = "my-project" analytics_region = "us-central1" runtime_type = "HYBRID" - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = {} + } apigee_envgroups = { eval = { environments = [ @@ -105,15 +120,15 @@ module "apigee-organization" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [analytics_region](variables.tf#L17) | Analytics Region for the Apigee Organization (immutable). See https://cloud.google.com/apigee/docs/api-platform/get-started/install-cli. | string | ✓ | | -| [project_id](variables.tf#L61) | Project ID to host this Apigee organization (will also become the Apigee Org name). | string | ✓ | | -| [runtime_type](variables.tf#L66) | Apigee runtime type. Must be `CLOUD` or `HYBRID`. | string | ✓ | | +| [project_id](variables.tf#L72) | Project ID to host this Apigee organization (will also become the Apigee Org name). | string | ✓ | | +| [runtime_type](variables.tf#L77) | Apigee runtime type. Must be `CLOUD` or `HYBRID`. | string | ✓ | | | [apigee_envgroups](variables.tf#L22) | Apigee Environment Groups. | map(object({…})) | | {} | -| [apigee_environments](variables.tf#L31) | Apigee Environment Names. | list(string) | | [] | -| [authorized_network](variables.tf#L37) | VPC network self link (requires service network peering enabled (Used in Apigee X only). | string | | null | -| [billing_type](variables.tf#L75) | Billing type of the Apigee organization. | string | | null | -| [database_encryption_key](variables.tf#L43) | Cloud KMS key self link (e.g. `projects/foo/locations/us/keyRings/bar/cryptoKeys/baz`) used for encrypting the data that is stored and replicated across runtime instances (immutable, used in Apigee X only). | string | | null | -| [description](variables.tf#L49) | Description of the Apigee Organization. | string | | "Apigee Organization created by tf module" | -| [display_name](variables.tf#L55) | Display Name of the Apigee Organization. | string | | null | +| [apigee_environments](variables.tf#L31) | Apigee Environment Names. | map(object({…})) | | {} | +| [authorized_network](variables.tf#L48) | VPC network self link (requires service network peering enabled (Used in Apigee X only). | string | | null | +| [billing_type](variables.tf#L86) | Billing type of the Apigee organization. | string | | null | +| [database_encryption_key](variables.tf#L54) | Cloud KMS key self link (e.g. `projects/foo/locations/us/keyRings/bar/cryptoKeys/baz`) used for encrypting the data that is stored and replicated across runtime instances (immutable, used in Apigee X only). | string | | null | +| [description](variables.tf#L60) | Description of the Apigee Organization. | string | | "Apigee Organization created by tf module" | +| [display_name](variables.tf#L66) | Display Name of the Apigee Organization. | string | | null | ## Outputs diff --git a/modules/apigee-organization/main.tf b/modules/apigee-organization/main.tf index 148711a9..a498135b 100644 --- a/modules/apigee-organization/main.tf +++ b/modules/apigee-organization/main.tf @@ -15,6 +15,14 @@ */ locals { + env_pairs = flatten([ + for env_name, env in var.apigee_environments : { + api_proxy_type = env.api_proxy_type + deployment_type = env.deployment_type + env_name = env_name + } + ]) + env_envgroup_pairs = flatten([ for eg_name, eg in var.apigee_envgroups : [ for e in eg.environments : { @@ -37,9 +45,11 @@ resource "google_apigee_organization" "apigee_org" { } resource "google_apigee_environment" "apigee_env" { - for_each = toset(var.apigee_environments) - org_id = google_apigee_organization.apigee_org.id - name = each.key + for_each = { for env in local.env_pairs : env.env_name => env } + api_proxy_type = each.value.api_proxy_type + deployment_type = each.value.deployment_type + name = each.key + org_id = google_apigee_organization.apigee_org.id } resource "google_apigee_envgroup" "apigee_envgroup" { diff --git a/modules/apigee-organization/variables.tf b/modules/apigee-organization/variables.tf index b2b3eac9..b3d13e15 100644 --- a/modules/apigee-organization/variables.tf +++ b/modules/apigee-organization/variables.tf @@ -30,8 +30,19 @@ variable "apigee_envgroups" { variable "apigee_environments" { description = "Apigee Environment Names." - type = list(string) - default = [] + type = map(object({ + api_proxy_type = optional(string, "API_PROXY_TYPE_UNSPECIFIED") + deployment_type = optional(string, "DEPLOYMENT_TYPE_UNSPECIFIED") + })) + default = {} + validation { + condition = alltrue([for k, v in var.apigee_environments : contains(["API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE"], v.api_proxy_type)]) + error_message = "Allowed values for api_proxy_type \"API_PROXY_TYPE_UNSPECIFIED\", \"PROGRAMMABLE\" or \"CONFIGURABLE\"." + } + validation { + condition = alltrue([for k, v in var.apigee_environments : contains(["DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE"], v.deployment_type)]) + error_message = "Allowed values for deployment_type \"DEPLOYMENT_TYPE_UNSPECIFIED\", \"PROXY\" or \"ARCHIVE\"." + } } variable "authorized_network" { diff --git a/modules/apigee-organization/versions.tf b/modules/apigee-organization/versions.tf index b1c8c910..286536a6 100644 --- a/modules/apigee-organization/versions.tf +++ b/modules/apigee-organization/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/apigee-x-instance/versions.tf b/modules/apigee-x-instance/versions.tf index b1c8c910..286536a6 100644 --- a/modules/apigee-x-instance/versions.tf +++ b/modules/apigee-x-instance/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/artifact-registry/versions.tf b/modules/artifact-registry/versions.tf index b1c8c910..286536a6 100644 --- a/modules/artifact-registry/versions.tf +++ b/modules/artifact-registry/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/bigquery-dataset/versions.tf b/modules/bigquery-dataset/versions.tf index b1c8c910..286536a6 100644 --- a/modules/bigquery-dataset/versions.tf +++ b/modules/bigquery-dataset/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/bigtable-instance/versions.tf b/modules/bigtable-instance/versions.tf index b1c8c910..286536a6 100644 --- a/modules/bigtable-instance/versions.tf +++ b/modules/bigtable-instance/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/billing-budget/versions.tf b/modules/billing-budget/versions.tf index b1c8c910..286536a6 100644 --- a/modules/billing-budget/versions.tf +++ b/modules/billing-budget/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/binauthz/versions.tf b/modules/binauthz/versions.tf index b1c8c910..286536a6 100644 --- a/modules/binauthz/versions.tf +++ b/modules/binauthz/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/coredns/versions.tf b/modules/cloud-config-container/coredns/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/coredns/versions.tf +++ b/modules/cloud-config-container/coredns/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/cos-generic-metadata/versions.tf b/modules/cloud-config-container/cos-generic-metadata/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/cos-generic-metadata/versions.tf +++ b/modules/cloud-config-container/cos-generic-metadata/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/envoy-traffic-director/versions.tf b/modules/cloud-config-container/envoy-traffic-director/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/envoy-traffic-director/versions.tf +++ b/modules/cloud-config-container/envoy-traffic-director/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/mysql/versions.tf b/modules/cloud-config-container/mysql/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/mysql/versions.tf +++ b/modules/cloud-config-container/mysql/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/nginx-tls/versions.tf b/modules/cloud-config-container/nginx-tls/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/nginx-tls/versions.tf +++ b/modules/cloud-config-container/nginx-tls/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/nginx/versions.tf b/modules/cloud-config-container/nginx/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/nginx/versions.tf +++ b/modules/cloud-config-container/nginx/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/onprem/versions.tf b/modules/cloud-config-container/onprem/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/onprem/versions.tf +++ b/modules/cloud-config-container/onprem/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/simple-nva/README.md b/modules/cloud-config-container/simple-nva/README.md index 5014e9a3..3f5b0553 100644 --- a/modules/cloud-config-container/simple-nva/README.md +++ b/modules/cloud-config-container/simple-nva/README.md @@ -35,6 +35,13 @@ module "nva-cloud-config" { source = "../../../cloud-foundation-fabric/modules/cloud-config-container/simple-nva" enable_health_checks = true network_interfaces = local.network_interfaces + files = { + "/var/lib/cloud/scripts/per-boot/firewall-rules.sh" = { + content = file("./your_path/to/firewall-rules.sh") + owner = "root" + permissions = 0700 + } + } } # COS VM @@ -63,9 +70,10 @@ module "nva" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [network_interfaces](variables.tf#L29) | Network interfaces configuration. | list(object({…})) | ✓ | | +| [network_interfaces](variables.tf#L39) | Network interfaces configuration. | list(object({…})) | ✓ | | | [cloud_config](variables.tf#L17) | Cloud config template path. If null default will be used. | string | | null | -| [enable_health_checks](variables.tf#L23) | Configures routing to enable responses to health check probes. | bool | | false | +| [enable_health_checks](variables.tf#L33) | Configures routing to enable responses to health check probes. | bool | | false | +| [files](variables.tf#L23) | Map of extra files to create on the instance, path as key. Owner and permissions will use defaults if null. | map(object({…})) | | {} | | [test_instance](variables-instance.tf#L17) | Test/development instance attributes, leave null to skip creation. | object({…}) | | null | | [test_instance_defaults](variables-instance.tf#L30) | Test/development instance defaults used for optional configuration. If image is null, COS stable will be used. | object({…}) | | {…} | diff --git a/modules/cloud-config-container/simple-nva/cloud-config.yaml b/modules/cloud-config-container/simple-nva/cloud-config.yaml index 8d18a356..f1d71e82 100644 --- a/modules/cloud-config-container/simple-nva/cloud-config.yaml +++ b/modules/cloud-config-container/simple-nva/cloud-config.yaml @@ -22,17 +22,37 @@ write_files: content: | ${indent(6, data.content)} %{ endfor } + - path: /etc/systemd/system/routing.service + permissions: 0644 + owner: root + content: | + [Install] + WantedBy=multi-user.target + [Unit] + Description=Start routing + After=network-online.target + Wants=network-online.target + [Service] + ExecStart=/bin/sh -c "/var/run/nva/start-routing.sh" + - path: /var/run/nva/start-routing.sh + permissions: 0744 + owner: root + content: | + iptables --policy FORWARD ACCEPT +%{ for interface in network_interfaces ~} +%{ if enable_health_checks ~} + /var/run/nva/policy_based_routing.sh ${interface.name} +%{ endif ~} +%{ for route in interface.routes ~} + ip route add ${route} via `curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/${interface.number}/gateway -H "Metadata-Flavor:Google"` dev ${interface.name} +%{ endfor ~} +%{ endfor ~} bootcmd: - systemctl start node-problem-detector runcmd: - - iptables --policy FORWARD ACCEPT -%{ for interface in network_interfaces ~} -%{ if enable_health_checks ~} - - /var/run/nva/policy_based_routing.sh ${interface.name} -%{ endif ~} -%{ for route in interface.routes ~} - - ip route add ${route} via `curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/${interface.number}/gateway -H "Metadata-Flavor:Google"` dev ${interface.name} -%{ endfor ~} -%{ endfor ~} + - systemctl daemon-reload + - systemctl enable routing + - systemctl start routing + diff --git a/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh b/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh index 42ed0dcb..2e1eb152 100644 --- a/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh +++ b/modules/cloud-config-container/simple-nva/files/policy_based_routing.sh @@ -15,13 +15,18 @@ # limitations under the License. IF_NAME=$1 -IF_NUMBER=$(echo $1 | sed -e s/eth//) -IF_GW=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/gateway -H "Metadata-Flavor: Google") -IF_IP=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/ip -H "Metadata-Flavor: Google") -IF_NETMASK=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/subnetmask -H "Metadata-Flavor: Google") -IF_IP_PREFIX=$(/var/run/nva/ipprefix_by_netmask.sh $IF_NETMASK) IP_LB=$(ip r show table local | grep "$IF_NAME proto 66" | cut -f 2 -d " ") -grep -qxF "$((200 + $IF_NUMBER)) hc-$IF_NAME" /etc/iproute2/rt_tables || echo "$((200 + $IF_NUMBER)) hc-$IF_NAME" >>/etc/iproute2/rt_tables -ip route add $IF_GW src $IF_IP dev $IF_NAME table hc-$IF_NAME -ip route add default via $IF_GW dev $IF_NAME table hc-$IF_NAME -ip rule add from $IP_LB/32 table hc-$IF_NAME + +# If there's a load balancer for this IF... +if [ ! -z $IP_LB ] +then + IF_NUMBER=$(echo $IF_NAME | sed -e s/eth//) + IF_GW=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/gateway -H "Metadata-Flavor: Google") + IF_IP=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/ip -H "Metadata-Flavor: Google") + IF_NETMASK=$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/$IF_NUMBER/subnetmask -H "Metadata-Flavor: Google") + IF_IP_PREFIX=$(/var/run/nva/ipprefix_by_netmask.sh $IF_NETMASK) + grep -qxF "$((200 + $IF_NUMBER)) hc-$IF_NAME" /etc/iproute2/rt_tables || echo "$((200 + $IF_NUMBER)) hc-$IF_NAME" >>/etc/iproute2/rt_tables + ip route add $IF_GW src $IF_IP dev $IF_NAME table hc-$IF_NAME + ip route add default via $IF_GW dev $IF_NAME table hc-$IF_NAME + ip rule add from $IP_LB/32 table hc-$IF_NAME +fi diff --git a/modules/cloud-config-container/simple-nva/main.tf b/modules/cloud-config-container/simple-nva/main.tf index 5b9663bd..4ff0afe2 100644 --- a/modules/cloud-config-container/simple-nva/main.tf +++ b/modules/cloud-config-container/simple-nva/main.tf @@ -21,7 +21,7 @@ locals { network_interfaces = local.network_interfaces })) - files = { + files = merge({ "/var/run/nva/ipprefix_by_netmask.sh" = { content = file("${path.module}/files/ipprefix_by_netmask.sh") owner = "root" @@ -32,7 +32,13 @@ locals { owner = "root" permissions = "0744" } - } + }, { + for path, attrs in var.files : path => { + content = attrs.content, + owner = attrs.owner, + permissions = attrs.permissions + } + }) network_interfaces = [ for index, interface in var.network_interfaces : { diff --git a/modules/cloud-config-container/simple-nva/variables.tf b/modules/cloud-config-container/simple-nva/variables.tf index 9307ddac..3c2ebfcb 100644 --- a/modules/cloud-config-container/simple-nva/variables.tf +++ b/modules/cloud-config-container/simple-nva/variables.tf @@ -20,6 +20,16 @@ variable "cloud_config" { default = null } +variable "files" { + description = "Map of extra files to create on the instance, path as key. Owner and permissions will use defaults if null." + type = map(object({ + content = string + owner = string + permissions = string + })) + default = {} +} + variable "enable_health_checks" { description = "Configures routing to enable responses to health check probes." type = bool diff --git a/modules/cloud-config-container/simple-nva/versions.tf b/modules/cloud-config-container/simple-nva/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/simple-nva/versions.tf +++ b/modules/cloud-config-container/simple-nva/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-config-container/squid/versions.tf b/modules/cloud-config-container/squid/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-config-container/squid/versions.tf +++ b/modules/cloud-config-container/squid/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-function/versions.tf b/modules/cloud-function/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-function/versions.tf +++ b/modules/cloud-function/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-identity-group/versions.tf b/modules/cloud-identity-group/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-identity-group/versions.tf +++ b/modules/cloud-identity-group/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloud-run/versions.tf b/modules/cloud-run/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloud-run/versions.tf +++ b/modules/cloud-run/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/cloudsql-instance/versions.tf b/modules/cloudsql-instance/versions.tf index b1c8c910..286536a6 100644 --- a/modules/cloudsql-instance/versions.tf +++ b/modules/cloudsql-instance/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/compute-mig/README.md b/modules/compute-mig/README.md index 1d421527..895f0517 100644 --- a/modules/compute-mig/README.md +++ b/modules/compute-mig/README.md @@ -2,7 +2,7 @@ This module allows creating a managed instance group supporting one or more application versions via instance templates. Optionally, a health check and an autoscaler can be created, and the managed instance group can be configured to be stateful. -This module can be coupled with the [`compute-vm`](../compute-vm) module which can manage instance templates, and the [`net-ilb`](../net-ilb) module to assign the MIG to a backend wired to an Internal Load Balancer. The first use case is shown in the examples below. +This module can be coupled with the [`compute-vm`](../compute-vm) module which can manage instance templates, and the [`net-ilb`](../net-ilb) module to assign the MIG to a backend wired to an Internal Load Balancer. The first use case is shown in the examples below. Stateful disks can be created directly, as shown in the last example below. @@ -39,15 +39,12 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 2 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 2 + instance_template = module.nginx-template.template.self_link } # tftest modules=2 resources=2 ``` @@ -85,20 +82,18 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link versions = { canary = { instance_template = module.nginx-template.template.self_link - target_type = "fixed" - target_size = 1 + target_size = { + fixed = 1 + } } } } @@ -138,24 +133,20 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link auto_healing_policies = { - health_check = module.nginx-mig.health_check.self_link initial_delay_sec = 30 } health_check_config = { - type = "http" - check = { port = 80 } - config = {} - logging = true + enable_logging = true + http = { + port = 80 + } } } # tftest modules=2 resources=3 @@ -194,22 +185,21 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link autoscaler_config = { - max_replicas = 3 - min_replicas = 1 - cooldown_period = 30 - cpu_utilization_target = 0.65 - load_balancing_utilization_target = null - metric = null + max_replicas = 3 + min_replicas = 1 + cooldown_period = 30 + scaling_signals = { + cpu_utilization = { + target = 0.65 + } + } } } # tftest modules=2 resources=3 @@ -246,23 +236,19 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link update_policy = { - type = "PROACTIVE" minimal_action = "REPLACE" + type = "PROACTIVE" min_ready_sec = 30 - max_surge_type = "fixed" - max_surge = 1 - max_unavailable_type = null - max_unavailable = null + max_surge = { + fixed = 1 + } } } # tftest modules=2 resources=2 @@ -270,7 +256,7 @@ module "nginx-mig" { ### Stateful MIGs - MIG Config -Stateful MIGs have some limitations documented [here](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-migs#limitations). Enforcement of these requirements is the responsibility of users of this module. +Stateful MIGs have some limitations documented [here](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-migs#limitations). Enforcement of these requirements is the responsibility of users of this module. You can configure a disk defined in the instance template to be stateful for all instances in the MIG by configuring in the MIG's stateful policy, using the `stateful_disk_mig` variable. Alternatively, you can also configure stateful persistent disks individually per instance of the MIG by setting the `stateful_disk_instance` variable. A discussion on these scenarios can be found in the [docs](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). @@ -278,7 +264,6 @@ An example using only the configuration at the MIG level can be seen below. Note that when referencing the stateful disk, you use `device_name` and not `disk_name`. - ```hcl module "cos-nginx" { source = "./fabric/modules/cloud-config-container/nginx" @@ -319,40 +304,33 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link autoscaler_config = { - max_replicas = 3 - min_replicas = 1 - cooldown_period = 30 - cpu_utilization_target = 0.65 - load_balancing_utilization_target = null - metric = null - } - stateful_config = { - per_instance_config = {}, - mig_config = { - stateful_disks = { - repd-1 = { - delete_rule = "NEVER" - } + max_replicas = 3 + min_replicas = 1 + cooldown_period = 30 + scaling_signals = { + cpu_utilization = { + target = 0.65 } } } + stateful_disks = { + repd-1 = null + } } # tftest modules=2 resources=3 ``` ### Stateful MIGs - Instance Config -Here is an example defining the stateful config at the instance level. + +Here is an example defining the stateful config at the instance level. Note that you will need to know the instance name in order to use this configuration. @@ -396,46 +374,36 @@ module "nginx-template" { } module "nginx-mig" { - source = "./fabric/modules/compute-mig" - project_id = "my-project" - location = "europe-west1-b" - name = "mig-test" - target_size = 3 - default_version = { - instance_template = module.nginx-template.template.self_link - name = "default" - } + source = "./fabric/modules/compute-mig" + project_id = "my-project" + location = "europe-west1-b" + name = "mig-test" + target_size = 3 + instance_template = module.nginx-template.template.self_link autoscaler_config = { - max_replicas = 3 - min_replicas = 1 - cooldown_period = 30 - cpu_utilization_target = 0.65 - load_balancing_utilization_target = null - metric = null + max_replicas = 3 + min_replicas = 1 + cooldown_period = 30 + scaling_signals = { + cpu_utilization = { + target = 0.65 + } + } } stateful_config = { - per_instance_config = { - # note that this needs to be the name of an existing instance within the Managed Instance Group - instance-1 = { - stateful_disks = { + # name needs to match a MIG instance name + instance-1 = { + minimal_action = "NONE", + most_disruptive_allowed_action = "REPLACE" + preserved_state = { + disks = { persistent-disk-1 = { source = "test-disk", - mode = "READ_ONLY", - delete_rule= "NEVER", - }, - }, + } + } metadata = { foo = "bar" - }, - update_config = { - minimal_action = "NONE", - most_disruptive_allowed_action = "REPLACE", - remove_instance_state_on_destroy = false, - }, - }, - }, - mig_config = { - stateful_disks = { + } } } } @@ -449,21 +417,25 @@ module "nginx-mig" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [default_version](variables.tf#L45) | Default application version template. Additional versions can be specified via the `versions` variable. | object({…}) | ✓ | | -| [location](variables.tf#L64) | Compute zone, or region if `regional` is set to true. | string | ✓ | | -| [name](variables.tf#L68) | Managed group name. | string | ✓ | | -| [project_id](variables.tf#L79) | Project id. | string | ✓ | | -| [auto_healing_policies](variables.tf#L17) | Auto-healing policies for this group. | object({…}) | | null | -| [autoscaler_config](variables.tf#L26) | Optional autoscaler configuration. Only one of 'cpu_utilization_target' 'load_balancing_utilization_target' or 'metric' can be not null. | object({…}) | | null | -| [health_check_config](variables.tf#L53) | Optional auto-created health check configuration, use the output self-link to set it in the auto healing policy. Refer to examples for usage. | object({…}) | | null | -| [named_ports](variables.tf#L73) | Named ports. | map(number) | | null | -| [regional](variables.tf#L84) | Use regional instance group. When set, `location` should be set to the region. | bool | | false | -| [stateful_config](variables.tf#L90) | Stateful configuration can be done by individual instances or for all instances in the MIG. They key in per_instance_config is the name of the specific instance. The key of the stateful_disks is the 'device_name' field of the resource. Please note that device_name is defined at the OS mount level, unlike the disk name. | object({…}) | | null | -| [target_pools](variables.tf#L121) | Optional list of URLs for target pools to which new instances in the group are added. | list(string) | | [] | -| [target_size](variables.tf#L127) | Group target size, leave null when using an autoscaler. | number | | null | -| [update_policy](variables.tf#L133) | Update policy. Type can be 'OPPORTUNISTIC' or 'PROACTIVE', action 'REPLACE' or 'restart', surge type 'fixed' or 'percent'. | object({…}) | | null | -| [versions](variables.tf#L148) | Additional application versions, target_type is either 'fixed' or 'percent'. | map(object({…})) | | null | -| [wait_for_instances](variables.tf#L158) | Wait for all instances to be created/updated before returning. | bool | | null | +| [instance_template](variables.tf#L174) | Instance template for the default version. | string | ✓ | | +| [location](variables.tf#L179) | Compute zone or region. | string | ✓ | | +| [name](variables.tf#L184) | Managed group name. | string | ✓ | | +| [project_id](variables.tf#L195) | Project id. | string | ✓ | | +| [all_instances_config](variables.tf#L17) | Metadata and labels set to all instances in the group. | object({…}) | | null | +| [auto_healing_policies](variables.tf#L26) | Auto-healing policies for this group. | object({…}) | | null | +| [autoscaler_config](variables.tf#L35) | Optional autoscaler configuration. | object({…}) | | null | +| [default_version_name](variables.tf#L83) | Name used for the default version. | string | | "default" | +| [description](variables.tf#L89) | Optional description used for all resources managed by this module. | string | | "Terraform managed." | +| [distribution_policy](variables.tf#L95) | DIstribution policy for regional MIG. | object({…}) | | null | +| [health_check_config](variables.tf#L104) | Optional auto-created health check configuration, use the output self-link to set it in the auto healing policy. Refer to examples for usage. | object({…}) | | null | +| [named_ports](variables.tf#L189) | Named ports. | map(number) | | null | +| [stateful_config](variables.tf#L207) | Stateful configuration for individual instances. | map(object({…})) | | {} | +| [stateful_disks](variables.tf#L200) | Stateful disk configuration applied at the MIG level to all instances, in device name => on permanent instance delete rule as boolean. | map(bool) | | {} | +| [target_pools](variables.tf#L226) | Optional list of URLs for target pools to which new instances in the group are added. | list(string) | | [] | +| [target_size](variables.tf#L232) | Group target size, leave null when using an autoscaler. | number | | null | +| [update_policy](variables.tf#L238) | Update policy. Minimal action and type are required. | object({…}) | | null | +| [versions](variables.tf#L259) | Additional application versions, target_size is optional. | map(object({…})) | | {} | +| [wait_for_instances](variables.tf#L272) | Wait for all instances to be created/updated before returning. | object({…}) | | null | ## Outputs diff --git a/modules/compute-mig/autoscaler.tf b/modules/compute-mig/autoscaler.tf new file mode 100644 index 00000000..b8bd0acc --- /dev/null +++ b/modules/compute-mig/autoscaler.tf @@ -0,0 +1,229 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# tfdoc:file:description Autoscaler resource. + +locals { + as_enabled = true + as_scaling = try(var.autoscaler_config.scaling_control, null) + as_signals = try(var.autoscaler_config.scaling_signals, null) +} + +resource "google_compute_autoscaler" "default" { + provider = google-beta + count = local.is_regional || var.autoscaler_config == null ? 0 : 1 + project = var.project_id + name = var.name + zone = var.location + description = var.description + target = google_compute_instance_group_manager.default.0.id + + autoscaling_policy { + max_replicas = var.autoscaler_config.max_replicas + min_replicas = var.autoscaler_config.min_replicas + cooldown_period = var.autoscaler_config.cooldown_period + + dynamic "scale_down_control" { + for_each = local.as_scaling.down == null ? [] : [""] + content { + time_window_sec = local.as_scaling.down.time_window_sec + dynamic "max_scaled_down_replicas" { + for_each = ( + local.as_scaling.down.max_replicas_fixed == null && + local.as_scaling.down.max_replicas_percent == null + ? [] + : [""] + ) + content { + fixed = local.as_scaling.down.max_replicas_fixed + percent = local.as_scaling.down.max_replicas_percent + } + } + } + } + + dynamic "scale_in_control" { + for_each = local.as_scaling.in == null ? [] : [""] + content { + time_window_sec = local.as_scaling.in.time_window_sec + dynamic "max_scaled_in_replicas" { + for_each = ( + local.as_scaling.in.max_replicas_fixed == null && + local.as_scaling.in.max_replicas_percent == null + ? [] + : [""] + ) + content { + fixed = local.as_scaling.in.max_replicas_fixed + percent = local.as_scaling.in.max_replicas_percent + } + } + } + } + + dynamic "cpu_utilization" { + for_each = local.as_signals.cpu_utilization == null ? [] : [""] + content { + target = local.as_signals.cpu_utilization.target + predictive_method = ( + local.as_signals.cpu_utilization.optimize_availability == true + ? "OPTIMIZE_AVAILABILITY" + : null + ) + } + } + + dynamic "load_balancing_utilization" { + for_each = local.as_signals.load_balancing_utilization == null ? [] : [""] + content { + target = local.as_signals.load_balancing_utilization.target + } + } + + dynamic "metric" { + for_each = toset( + local.as_signals.metrics == null ? [] : local.as_signals.metrics + ) + content { + name = metric.value.name + type = metric.value.type + target = metric.value.target_value + single_instance_assignment = metric.value.single_instance_assignment + filter = metric.value.time_series_filter + } + } + + dynamic "scaling_schedules" { + for_each = toset( + local.as_signals.schedules == null ? [] : local.as_signals.schedules + ) + iterator = schedule + content { + duration_sec = schedule.value.duration_sec + min_required_replicas = schedule.value.min_required_replicas + name = schedule.value.name + schedule = schedule.value.cron_schedule + description = schedule.value.description + disabled = schedule.value.disabled + time_zone = schedule.value.timezone + } + } + + } +} + +resource "google_compute_region_autoscaler" "default" { + provider = google-beta + count = local.is_regional && var.autoscaler_config != null ? 1 : 0 + project = var.project_id + name = var.name + region = var.location + description = var.description + target = google_compute_region_instance_group_manager.default.0.id + + autoscaling_policy { + max_replicas = var.autoscaler_config.max_replicas + min_replicas = var.autoscaler_config.min_replicas + cooldown_period = var.autoscaler_config.cooldown_period + + dynamic "scale_down_control" { + for_each = local.as_scaling.down == null ? [] : [""] + content { + time_window_sec = local.as_scaling.down.time_window_sec + dynamic "max_scaled_down_replicas" { + for_each = ( + local.as_scaling.down.max_replicas_fixed == null && + local.as_scaling.down.max_replicas_percent == null + ? [] + : [""] + ) + content { + fixed = local.as_scaling.down.max_replicas_fixed + percent = local.as_scaling.down.max_replicas_percent + } + } + } + } + + dynamic "scale_in_control" { + for_each = local.as_scaling.in == null ? [] : [""] + content { + time_window_sec = local.as_scaling.in.time_window_sec + dynamic "max_scaled_in_replicas" { + for_each = ( + local.as_scaling.in.max_replicas_fixed == null && + local.as_scaling.in.max_replicas_percent == null + ? [] + : [""] + ) + content { + fixed = local.as_scaling.in.max_replicas_fixed + percent = local.as_scaling.in.max_replicas_percent + } + } + } + } + + dynamic "cpu_utilization" { + for_each = local.as_signals.cpu_utilization == null ? [] : [""] + content { + target = local.as_signals.cpu_utilization.target + predictive_method = ( + local.as_signals.cpu_utilization.optimize_availability == true + ? "OPTIMIZE_AVAILABILITY" + : null + ) + } + } + + dynamic "load_balancing_utilization" { + for_each = local.as_signals.load_balancing_utilization == null ? [] : [""] + content { + target = local.as_signals.load_balancing_utilization.target + } + } + + dynamic "metric" { + for_each = toset( + local.as_signals.metrics == null ? [] : local.as_signals.metrics + ) + content { + name = metric.value.name + type = metric.value.type + target = metric.value.target_value + single_instance_assignment = metric.value.single_instance_assignment + filter = metric.value.time_series_filter + } + } + + dynamic "scaling_schedules" { + for_each = toset( + local.as_signals.schedules == null ? [] : local.as_signals.schedules + ) + iterator = schedule + content { + duration_sec = schedule.value.duration_sec + min_required_replicas = schedule.value.min_required_replicas + name = schedule.value.name + schedule = schedule.cron_schedule + description = schedule.value.description + disabled = schedule.value.disabled + time_zone = schedule.value.timezone + } + } + + } +} diff --git a/modules/compute-mig/health-check.tf b/modules/compute-mig/health-check.tf new file mode 100644 index 00000000..4a4ed40d --- /dev/null +++ b/modules/compute-mig/health-check.tf @@ -0,0 +1,119 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# tfdoc:file:description Health check resource. + +locals { + hc = var.health_check_config + hc_grpc = try(local.hc.grpc, null) != null + hc_http = try(local.hc.http, null) != null + hc_http2 = try(local.hc.http2, null) != null + hc_https = try(local.hc.https, null) != null + hc_ssl = try(local.hc.ssl, null) != null + hc_tcp = try(local.hc.tcp, null) != null +} + +resource "google_compute_health_check" "default" { + provider = google-beta + count = local.hc != null ? 1 : 0 + project = var.project_id + name = var.name + description = local.hc.description + check_interval_sec = local.hc.check_interval_sec + healthy_threshold = local.hc.healthy_threshold + timeout_sec = local.hc.timeout_sec + unhealthy_threshold = local.hc.unhealthy_threshold + + dynamic "grpc_health_check" { + for_each = local.hc_grpc ? [""] : [] + content { + port = local.hc.grpc.port + port_name = local.hc.grpc.port_name + port_specification = local.hc.grpc.port_specification + grpc_service_name = local.hc.grpc.service_name + } + } + + dynamic "http_health_check" { + for_each = local.hc_http ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "http2_health_check" { + for_each = local.hc_http2 ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "https_health_check" { + for_each = local.hc_https ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "ssl_health_check" { + for_each = local.hc_ssl ? [""] : [] + content { + port = local.hc.tcp.port + port_name = local.hc.tcp.port_name + port_specification = local.hc.tcp.port_specification + proxy_header = local.hc.tcp.proxy_header + request = local.hc.tcp.request + response = local.hc.tcp.response + } + } + + dynamic "tcp_health_check" { + for_each = local.hc_tcp ? [""] : [] + content { + port = local.hc.tcp.port + port_name = local.hc.tcp.port_name + port_specification = local.hc.tcp.port_specification + proxy_header = local.hc.tcp.proxy_header + request = local.hc.tcp.request + response = local.hc.tcp.response + } + } + + dynamic "log_config" { + for_each = try(local.hc.enable_logging, null) == true ? [""] : [] + content { + enable = true + } + } +} diff --git a/modules/compute-mig/main.tf b/modules/compute-mig/main.tf index b5a71fac..35f255a6 100644 --- a/modules/compute-mig/main.tf +++ b/modules/compute-mig/main.tf @@ -14,105 +14,50 @@ * limitations under the License. */ -resource "google_compute_autoscaler" "default" { - provider = google-beta - count = var.regional || var.autoscaler_config == null ? 0 : 1 - project = var.project_id - name = var.name - description = "Terraform managed." - zone = var.location - target = google_compute_instance_group_manager.default.0.id - - autoscaling_policy { - max_replicas = var.autoscaler_config.max_replicas - min_replicas = var.autoscaler_config.min_replicas - cooldown_period = var.autoscaler_config.cooldown_period - - dynamic "cpu_utilization" { - for_each = ( - var.autoscaler_config.cpu_utilization_target == null ? [] : [""] - ) - content { - target = var.autoscaler_config.cpu_utilization_target - } - } - - dynamic "load_balancing_utilization" { - for_each = ( - var.autoscaler_config.load_balancing_utilization_target == null ? [] : [""] - ) - content { - target = var.autoscaler_config.load_balancing_utilization_target - } - } - - dynamic "metric" { - for_each = ( - var.autoscaler_config.metric == null - ? [] - : [var.autoscaler_config.metric] - ) - iterator = config - content { - name = config.value.name - single_instance_assignment = config.value.single_instance_assignment - target = config.value.target - type = config.value.type - filter = config.value.filter - } - } - } +locals { + health_check = ( + try(var.auto_healing_policies.health_check, null) == null + ? try(google_compute_health_check.default.0.self_link, null) + : try(var.auto_healing_policies.health_check, null) + ) + instance_group_manager = ( + local.is_regional ? + google_compute_region_instance_group_manager.default : + google_compute_instance_group_manager.default + ) + is_regional = length(split("-", var.location)) == 2 } - resource "google_compute_instance_group_manager" "default" { - provider = google-beta - count = var.regional ? 0 : 1 - project = var.project_id - zone = var.location - name = var.name - base_instance_name = var.name - description = "Terraform-managed." - target_size = var.target_size - target_pools = var.target_pools - wait_for_instances = var.wait_for_instances + provider = google-beta + count = local.is_regional ? 0 : 1 + project = var.project_id + zone = var.location + name = var.name + base_instance_name = var.name + description = var.description + target_size = var.target_size + target_pools = var.target_pools + wait_for_instances = try(var.wait_for_instances.enabled, null) + wait_for_instances_status = try(var.wait_for_instances.status, null) + + dynamic "all_instances_config" { + for_each = var.all_instances_config == null ? [] : [""] + content { + labels = try(var.all_instances_config.labels, null) + metadata = try(var.all_instances_config.metadata, null) + } + } + dynamic "auto_healing_policies" { - for_each = var.auto_healing_policies == null ? [] : [var.auto_healing_policies] + for_each = var.auto_healing_policies == null ? [] : [""] iterator = config content { - health_check = config.value.health_check - initial_delay_sec = config.value.initial_delay_sec - } - } - dynamic "stateful_disk" { - for_each = try(var.stateful_config.mig_config.stateful_disks, {}) - iterator = config - content { - device_name = config.key - delete_rule = config.value.delete_rule - } - } - dynamic "update_policy" { - for_each = var.update_policy == null ? [] : [var.update_policy] - iterator = config - content { - type = config.value.type - minimal_action = config.value.minimal_action - min_ready_sec = config.value.min_ready_sec - max_surge_fixed = ( - config.value.max_surge_type == "fixed" ? config.value.max_surge : null - ) - max_surge_percent = ( - config.value.max_surge_type == "percent" ? config.value.max_surge : null - ) - max_unavailable_fixed = ( - config.value.max_unavailable_type == "fixed" ? config.value.max_unavailable : null - ) - max_unavailable_percent = ( - config.value.max_unavailable_type == "percent" ? config.value.max_unavailable : null - ) + health_check = local.health_check + initial_delay_sec = var.auto_healing_policies.initial_delay_sec } } + dynamic "named_port" { for_each = var.named_ports == null ? {} : var.named_ports iterator = config @@ -121,167 +66,88 @@ resource "google_compute_instance_group_manager" "default" { port = config.value } } - version { - instance_template = var.default_version.instance_template - name = var.default_version.name + + dynamic "stateful_disk" { + for_each = var.stateful_disks + content { + device_name = stateful_disk.key + delete_rule = stateful_disk.value + } } + + dynamic "update_policy" { + for_each = var.update_policy == null ? [] : [var.update_policy] + iterator = p + content { + minimal_action = p.value.minimal_action + type = p.value.type + max_surge_fixed = try(p.value.max_surge.fixed, null) + max_surge_percent = try(p.value.max_surge.percent, null) + max_unavailable_fixed = try(p.value.max_unavailable.fixed, null) + max_unavailable_percent = try(p.value.max_unavailable.percent, null) + min_ready_sec = p.value.min_ready_sec + most_disruptive_allowed_action = p.value.most_disruptive_action + replacement_method = p.value.replacement_method + } + } + + version { + instance_template = var.instance_template + name = var.default_version_name + } + dynamic "version" { - for_each = var.versions == null ? {} : var.versions - iterator = version + for_each = var.versions content { name = version.key instance_template = version.value.instance_template - target_size { - fixed = ( - version.value.target_type == "fixed" ? version.value.target_size : null - ) - percent = ( - version.value.target_type == "percent" ? version.value.target_size : null - ) + dynamic "target_size" { + for_each = version.value.target_size == null ? [] : [""] + content { + fixed = version.value.target_size.fixed + percent = version.value.target_size.percent + } } } } } -locals { - instance_group_manager = ( - var.regional ? - google_compute_region_instance_group_manager.default : - google_compute_instance_group_manager.default - ) -} - -resource "google_compute_per_instance_config" "default" { - for_each = try(var.stateful_config.per_instance_config, {}) - #for_each = var.stateful_config && var.stateful_config.per_instance_config == null ? {} : length(var.stateful_config.per_instance_config) - zone = var.location - # terraform error, solved with locals - #instance_group_manager = var.regional ? google_compute_region_instance_group_manager.default : google_compute_instance_group_manager.default - instance_group_manager = local.instance_group_manager[0].id - name = each.key - project = var.project_id - minimal_action = try(each.value.update_config.minimal_action, null) - most_disruptive_allowed_action = try(each.value.update_config.most_disruptive_allowed_action, null) - remove_instance_state_on_destroy = try(each.value.update_config.remove_instance_state_on_destroy, null) - preserved_state { - - metadata = each.value.metadata - - dynamic "disk" { - for_each = try(each.value.stateful_disks, {}) - #for_each = var.stateful_config.mig_config.stateful_disks == null ? {} : var.stateful_config.mig_config.stateful_disks - iterator = config - content { - device_name = config.key - source = config.value.source - mode = config.value.mode - delete_rule = config.value.delete_rule - } - } - } -} - -resource "google_compute_region_autoscaler" "default" { - provider = google-beta - count = var.regional && var.autoscaler_config != null ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - region = var.location - target = google_compute_region_instance_group_manager.default.0.id - - autoscaling_policy { - max_replicas = var.autoscaler_config.max_replicas - min_replicas = var.autoscaler_config.min_replicas - cooldown_period = var.autoscaler_config.cooldown_period - - dynamic "cpu_utilization" { - for_each = ( - var.autoscaler_config.cpu_utilization_target == null ? [] : [""] - ) - content { - target = var.autoscaler_config.cpu_utilization_target - } - } - - dynamic "load_balancing_utilization" { - for_each = ( - var.autoscaler_config.load_balancing_utilization_target == null ? [] : [""] - ) - content { - target = var.autoscaler_config.load_balancing_utilization_target - } - } - - dynamic "metric" { - for_each = ( - var.autoscaler_config.metric == null - ? [] - : [var.autoscaler_config.metric] - ) - iterator = config - content { - name = config.value.name - single_instance_assignment = config.value.single_instance_assignment - target = config.value.target - type = config.value.type - filter = config.value.filter - } - } - } -} - - resource "google_compute_region_instance_group_manager" "default" { provider = google-beta - count = var.regional ? 1 : 0 + count = local.is_regional ? 1 : 0 project = var.project_id region = var.location name = var.name base_instance_name = var.name - description = "Terraform-managed." - target_size = var.target_size - target_pools = var.target_pools - wait_for_instances = var.wait_for_instances - dynamic "auto_healing_policies" { - for_each = var.auto_healing_policies == null ? [] : [var.auto_healing_policies] - iterator = config + description = var.description + distribution_policy_target_shape = try( + var.distribution_policy.target_shape, null + ) + distribution_policy_zones = try( + var.distribution_policy.zones, null + ) + target_size = var.target_size + target_pools = var.target_pools + wait_for_instances = try(var.wait_for_instances.enabled, null) + wait_for_instances_status = try(var.wait_for_instances.status, null) + + dynamic "all_instances_config" { + for_each = var.all_instances_config == null ? [] : [""] content { - health_check = config.value.health_check - initial_delay_sec = config.value.initial_delay_sec - } - } - dynamic "stateful_disk" { - for_each = try(var.stateful_config.mig_config.stateful_disks, {}) - iterator = config - content { - device_name = config.key - delete_rule = config.value.delete_rule + labels = try(var.all_instances_config.labels, null) + metadata = try(var.all_instances_config.metadata, null) } } - dynamic "update_policy" { - for_each = var.update_policy == null ? [] : [var.update_policy] + dynamic "auto_healing_policies" { + for_each = var.auto_healing_policies == null ? [] : [""] iterator = config content { - instance_redistribution_type = config.value.instance_redistribution_type - type = config.value.type - minimal_action = config.value.minimal_action - min_ready_sec = config.value.min_ready_sec - max_surge_fixed = ( - config.value.max_surge_type == "fixed" ? config.value.max_surge : null - ) - max_surge_percent = ( - config.value.max_surge_type == "percent" ? config.value.max_surge : null - ) - max_unavailable_fixed = ( - config.value.max_unavailable_type == "fixed" ? config.value.max_unavailable : null - ) - max_unavailable_percent = ( - config.value.max_unavailable_type == "percent" ? config.value.max_unavailable : null - ) + health_check = local.health_check + initial_delay_sec = var.auto_healing_policies.initial_delay_sec } } + dynamic "named_port" { for_each = var.named_ports == null ? {} : var.named_ports iterator = config @@ -290,172 +156,49 @@ resource "google_compute_region_instance_group_manager" "default" { port = config.value } } - version { - instance_template = var.default_version.instance_template - name = var.default_version.name + + dynamic "stateful_disk" { + for_each = var.stateful_disks + content { + device_name = stateful_disk.key + delete_rule = stateful_disk.value + } } + + dynamic "update_policy" { + for_each = var.update_policy == null ? [] : [var.update_policy] + iterator = p + content { + minimal_action = p.value.minimal_action + type = p.value.type + instance_redistribution_type = p.value.regional_redistribution_type + max_surge_fixed = try(p.value.max_surge.fixed, null) + max_surge_percent = try(p.value.max_surge.percent, null) + max_unavailable_fixed = try(p.value.max_unavailable.fixed, null) + max_unavailable_percent = try(p.value.max_unavailable.percent, null) + min_ready_sec = p.value.min_ready_sec + most_disruptive_allowed_action = p.value.most_disruptive_action + replacement_method = p.value.replacement_method + } + } + + version { + instance_template = var.instance_template + name = var.default_version_name + } + dynamic "version" { - for_each = var.versions == null ? {} : var.versions - iterator = version + for_each = var.versions content { name = version.key instance_template = version.value.instance_template - target_size { - fixed = ( - version.value.target_type == "fixed" ? version.value.target_size : null - ) - percent = ( - version.value.target_type == "percent" ? version.value.target_size : null - ) + dynamic "target_size" { + for_each = version.value.target_size == null ? [] : [""] + content { + fixed = version.value.target_size.fixed + percent = version.value.target_size.percent + } } } } } - -resource "google_compute_health_check" "http" { - provider = google-beta - count = try(var.health_check_config.type, null) == "http" ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - http_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "https" { - provider = google-beta - count = try(var.health_check_config.type, null) == "https" ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - https_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "tcp" { - provider = google-beta - count = try(var.health_check_config.type, null) == "tcp" ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - tcp_health_check { - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request = try(var.health_check_config.check.request, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "ssl" { - provider = google-beta - count = try(var.health_check_config.type, null) == "ssl" ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - ssl_health_check { - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request = try(var.health_check_config.check.request, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "http2" { - provider = google-beta - count = try(var.health_check_config.type, null) == "http2" ? 1 : 0 - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - http2_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} diff --git a/modules/compute-mig/outputs.tf b/modules/compute-mig/outputs.tf index 93de9223..41b20c1f 100644 --- a/modules/compute-mig/outputs.tf +++ b/modules/compute-mig/outputs.tf @@ -37,13 +37,6 @@ output "health_check" { value = ( var.health_check_config == null ? null - : try( - google_compute_health_check.http.0, - google_compute_health_check.https.0, - google_compute_health_check.tcp.0, - google_compute_health_check.ssl.0, - google_compute_health_check.http2.0, - {} - ) + : google_compute_health_check.default.0 ) } diff --git a/modules/compute-mig/stateful-config.tf b/modules/compute-mig/stateful-config.tf new file mode 100644 index 00000000..1e9e056e --- /dev/null +++ b/modules/compute-mig/stateful-config.tf @@ -0,0 +1,91 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# tfdoc:file:description Instance-level stateful configuration resources. + +resource "google_compute_per_instance_config" "default" { + for_each = local.is_regional ? {} : var.stateful_config + project = var.project_id + zone = var.location + name = each.key + instance_group_manager = try( + google_compute_instance_group_manager.default.0.id, null + ) + minimal_action = each.value.minimal_action + most_disruptive_allowed_action = each.value.most_disruptive_action + remove_instance_state_on_destroy = each.value.remove_state_on_destroy + + dynamic "preserved_state" { + for_each = each.value.preserved_state == null ? [] : [""] + content { + metadata = each.value.preserved_state.metadata + dynamic "disk" { + for_each = ( + each.value.preserved_state.disks == null + ? {} + : each.value.preserved_state.disks + ) + content { + device_name = disk.key + source = disk.value.source + delete_rule = ( + disk.value.delete_on_instance_deletion == true + ? "ON_PERMANENT_INSTANCE_DELETION" + : "NEVER" + ) + mode = disk.value.read_only == true ? "READ_ONLY" : "READ_WRITE" + } + } + } + } +} + +resource "google_compute_region_per_instance_config" "default" { + for_each = local.is_regional ? var.stateful_config : {} + project = var.project_id + region = var.location + name = each.key + region_instance_group_manager = try( + google_compute_region_instance_group_manager.default.0.id, null + ) + minimal_action = each.value.minimal_action + most_disruptive_allowed_action = each.value.most_disruptive_action + remove_instance_state_on_destroy = each.value.remove_state_on_destroy + + dynamic "preserved_state" { + for_each = each.value.preserved_state == null ? [] : [""] + content { + metadata = each.value.preserved_state.metadata + dynamic "disk" { + for_each = ( + each.value.preserved_state.disks == null + ? {} + : each.value.preserved_state.disks + ) + content { + device_name = disk.key + source = disk.value.source + delete_rule = ( + disk.value.delete_on_instance_deletion == true + ? "ON_PERMANENT_INSTANCE_DELETION" + : "NEVER" + ) + mode = disk.value.read_only == true ? "READ_ONLY" : "READ_WRITE" + } + } + } + } +} diff --git a/modules/compute-mig/variables.tf b/modules/compute-mig/variables.tf index 76f4fb21..056bd198 100644 --- a/modules/compute-mig/variables.tf +++ b/modules/compute-mig/variables.tf @@ -14,57 +14,173 @@ * limitations under the License. */ +variable "all_instances_config" { + description = "Metadata and labels set to all instances in the group." + type = object({ + labels = optional(map(string)) + metadata = optional(map(string)) + }) + default = null +} + variable "auto_healing_policies" { description = "Auto-healing policies for this group." type = object({ - health_check = string + health_check = optional(string) initial_delay_sec = number }) default = null } variable "autoscaler_config" { - description = "Optional autoscaler configuration. Only one of 'cpu_utilization_target' 'load_balancing_utilization_target' or 'metric' can be not null." + description = "Optional autoscaler configuration." type = object({ - max_replicas = number - min_replicas = number - cooldown_period = number - cpu_utilization_target = number - load_balancing_utilization_target = number - metric = object({ - name = string - single_instance_assignment = number - target = number - type = string # GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE - filter = string - }) + max_replicas = number + min_replicas = number + cooldown_period = optional(number) + mode = optional(string) # OFF, ONLY_UP, ON + scaling_control = optional(object({ + down = optional(object({ + max_replicas_fixed = optional(number) + max_replicas_percent = optional(number) + time_window_sec = optional(number) + })) + in = optional(object({ + max_replicas_fixed = optional(number) + max_replicas_percent = optional(number) + time_window_sec = optional(number) + })) + }), {}) + scaling_signals = optional(object({ + cpu_utilization = optional(object({ + target = number + optimize_availability = optional(bool) + })) + load_balancing_utilization = optional(object({ + target = number + })) + metrics = optional(list(object({ + name = string + type = string # GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE + target_value = number + single_instance_assignment = optional(number) + time_series_filter = optional(string) + }))) + schedules = optional(list(object({ + duration_sec = number + name = string + min_required_replicas = number + cron_schedule = string + description = optional(bool) + timezone = optional(string) + disabled = optional(bool) + }))) + }), {}) }) default = null } -variable "default_version" { - description = "Default application version template. Additional versions can be specified via the `versions` variable." +variable "default_version_name" { + description = "Name used for the default version." + type = string + default = "default" +} + +variable "description" { + description = "Optional description used for all resources managed by this module." + type = string + default = "Terraform managed." +} + +variable "distribution_policy" { + description = "DIstribution policy for regional MIG." type = object({ - instance_template = string - name = string + target_shape = optional(string) + zones = optional(list(string)) }) + default = null } variable "health_check_config" { description = "Optional auto-created health check configuration, use the output self-link to set it in the auto healing policy. Refer to examples for usage." type = object({ - type = string # http https tcp ssl http2 - check = map(any) # actual health check block attributes - config = map(number) # interval, thresholds, timeout - logging = bool + check_interval_sec = optional(number) + description = optional(string, "Terraform managed.") + enable_logging = optional(bool, false) + healthy_threshold = optional(number) + timeout_sec = optional(number) + unhealthy_threshold = optional(number) + grpc = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + service_name = optional(string) + })) + http = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + http2 = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + https = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + tcp = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request = optional(string) + response = optional(string) + })) + ssl = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request = optional(string) + response = optional(string) + })) }) default = null + validation { + condition = ( + (try(var.health_check_config.grpc, null) == null ? 0 : 1) + + (try(var.health_check_config.http, null) == null ? 0 : 1) + + (try(var.health_check_config.tcp, null) == null ? 0 : 1) <= 1 + ) + error_message = "Only one health check type can be configured at a time." + } +} + +variable "instance_template" { + description = "Instance template for the default version." + type = string } variable "location" { - description = "Compute zone, or region if `regional` is set to true." + description = "Compute zone or region." type = string } + variable "name" { description = "Managed group name." type = string @@ -81,41 +197,30 @@ variable "project_id" { type = string } -variable "regional" { - description = "Use regional instance group. When set, `location` should be set to the region." - type = bool - default = false +variable "stateful_disks" { + description = "Stateful disk configuration applied at the MIG level to all instances, in device name => on permanent instance delete rule as boolean." + type = map(bool) + default = {} + nullable = false } variable "stateful_config" { - description = "Stateful configuration can be done by individual instances or for all instances in the MIG. They key in per_instance_config is the name of the specific instance. The key of the stateful_disks is the 'device_name' field of the resource. Please note that device_name is defined at the OS mount level, unlike the disk name." - type = object({ - per_instance_config = map(object({ - #name is the key - #name = string - stateful_disks = map(object({ - #device_name is the key - source = string - mode = string # READ_WRITE | READ_ONLY - delete_rule = string # NEVER | ON_PERMANENT_INSTANCE_DELETION - })) - metadata = map(string) - update_config = object({ - minimal_action = string # NONE | REPLACE | RESTART | REFRESH - most_disruptive_allowed_action = string # REPLACE | RESTART | REFRESH | NONE - remove_instance_state_on_destroy = bool - }) + description = "Stateful configuration for individual instances." + type = map(object({ + minimal_action = optional(string) + most_disruptive_action = optional(string) + remove_state_on_destroy = optional(bool) + preserved_state = optional(object({ + disks = optional(map(object({ + source = string + delete_on_instance_deletion = optional(bool) + read_only = optional(bool) + }))) + metadata = optional(map(string)) })) - - mig_config = object({ - stateful_disks = map(object({ - #device_name is the key - delete_rule = string # NEVER | ON_PERMANENT_INSTANCE_DELETION - })) - }) - - }) - default = null + })) + default = {} + nullable = false } variable "target_pools" { @@ -131,32 +236,44 @@ variable "target_size" { } variable "update_policy" { - description = "Update policy. Type can be 'OPPORTUNISTIC' or 'PROACTIVE', action 'REPLACE' or 'restart', surge type 'fixed' or 'percent'." + description = "Update policy. Minimal action and type are required." type = object({ - instance_redistribution_type = optional(string, "PROACTIVE") # NONE | PROACTIVE. The attribute is ignored if regional is set to false. - max_surge_type = string # fixed | percent - max_surge = number - max_unavailable_type = string - max_unavailable = number - minimal_action = string # REPLACE | RESTART - min_ready_sec = number - type = string # OPPORTUNISTIC | PROACTIVE + minimal_action = string + type = string + max_surge = optional(object({ + fixed = optional(number) + percent = optional(number) + })) + max_unavailable = optional(object({ + fixed = optional(number) + percent = optional(number) + })) + min_ready_sec = optional(number) + most_disruptive_action = optional(string) + regional_redistribution_type = optional(string) + replacement_method = optional(string) }) default = null } variable "versions" { - description = "Additional application versions, target_type is either 'fixed' or 'percent'." + description = "Additional application versions, target_size is optional." type = map(object({ instance_template = string - target_type = string # fixed | percent - target_size = number + target_size = optional(object({ + fixed = optional(number) + percent = optional(number) + })) })) - default = null + default = {} + nullable = false } variable "wait_for_instances" { description = "Wait for all instances to be created/updated before returning." - type = bool - default = null + type = object({ + enabled = bool + status = optional(string) + }) + default = null } diff --git a/modules/compute-mig/versions.tf b/modules/compute-mig/versions.tf index b1c8c910..286536a6 100644 --- a/modules/compute-mig/versions.tf +++ b/modules/compute-mig/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/compute-vm/versions.tf b/modules/compute-vm/versions.tf index b1c8c910..286536a6 100644 --- a/modules/compute-vm/versions.tf +++ b/modules/compute-vm/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/container-registry/versions.tf b/modules/container-registry/versions.tf index b1c8c910..286536a6 100644 --- a/modules/container-registry/versions.tf +++ b/modules/container-registry/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/data-catalog-policy-tag/versions.tf b/modules/data-catalog-policy-tag/versions.tf index b1c8c910..286536a6 100644 --- a/modules/data-catalog-policy-tag/versions.tf +++ b/modules/data-catalog-policy-tag/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/datafusion/versions.tf b/modules/datafusion/versions.tf index b1c8c910..286536a6 100644 --- a/modules/datafusion/versions.tf +++ b/modules/datafusion/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/dns/README.md b/modules/dns/README.md index cfb8844f..62b38efc 100644 --- a/modules/dns/README.md +++ b/modules/dns/README.md @@ -17,10 +17,11 @@ module "private-dns" { domain = "test.example." client_networks = [var.vpc.self_link] recordsets = { - "A localhost" = { ttl = 300, records = ["127.0.0.1"] } + "A localhost" = { records = ["127.0.0.1"] } + "A myhost" = { ttl = 600, records = ["10.0.0.120"] } } } -# tftest modules=1 resources=2 +# tftest modules=1 resources=3 ``` ### Forwarding Zone @@ -52,26 +53,58 @@ module "private-dns" { } # tftest modules=1 resources=1 ``` + +### Routing Policies + +```hcl +module "private-dns" { + source = "./fabric/modules/dns" + project_id = "myproject" + type = "private" + name = "test-example" + domain = "test.example." + client_networks = [var.vpc.self_link] + recordsets = { + "A regular" = { records = ["10.20.0.1"] } + "A geo" = { + geo_routing = [ + { location = "europe-west1", records = ["10.0.0.1"] }, + { location = "europe-west2", records = ["10.0.0.2"] }, + { location = "europe-west3", records = ["10.0.0.3"] } + ] + } + + "A wrr" = { + ttl = 600 + wrr_routing = [ + { weight = 0.6, records = ["10.10.0.1"] }, + { weight = 0.2, records = ["10.10.0.2"] }, + { weight = 0.2, records = ["10.10.0.3"] } + ] + } + } +} +# tftest modules=1 resources=4 +``` ## Variables | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [domain](variables.tf#L51) | Zone domain, must end with a period. | string | ✓ | | -| [name](variables.tf#L62) | Zone name, must be unique within the project. | string | ✓ | | -| [project_id](variables.tf#L73) | Project id for the zone. | string | ✓ | | +| [domain](variables.tf#L54) | Zone domain, must end with a period. | string | ✓ | | +| [name](variables.tf#L72) | Zone name, must be unique within the project. | string | ✓ | | +| [project_id](variables.tf#L83) | Project id for the zone. | string | ✓ | | | [client_networks](variables.tf#L21) | List of VPC self links that can see this zone. | list(string) | | [] | -| [default_key_specs_key](variables.tf#L27) | DNSSEC default key signing specifications: algorithm, key_length, key_type, kind. | any | | {} | -| [default_key_specs_zone](variables.tf#L33) | DNSSEC default zone signing specifications: algorithm, key_length, key_type, kind. | any | | {} | -| [description](variables.tf#L39) | Domain description. | string | | "Terraform managed." | -| [dnssec_config](variables.tf#L45) | DNSSEC configuration: kind, non_existence, state. | any | | {} | -| [forwarders](variables.tf#L56) | Map of {IPV4_ADDRESS => FORWARDING_PATH} for 'forwarding' zone types. Path can be 'default', 'private', or null for provider default. | map(string) | | {} | -| [peer_network](variables.tf#L67) | Peering network self link, only valid for 'peering' zone types. | string | | null | -| [recordsets](variables.tf#L78) | Map of DNS recordsets in \"type name\" => {ttl, [records]} format. | map(object({…})) | | {} | -| [service_directory_namespace](variables.tf#L94) | Service directory namespace id (URL), only valid for 'service-directory' zone types. | string | | null | -| [type](variables.tf#L100) | Type of zone to create, valid values are 'public', 'private', 'forwarding', 'peering', 'service-directory'. | string | | "private" | -| [zone_create](variables.tf#L110) | Create zone. When set to false, uses a data source to reference existing zone. | bool | | true | +| [description](variables.tf#L28) | Domain description. | string | | "Terraform managed." | +| [dnssec_config](variables.tf#L34) | DNSSEC configuration for this zone. | object({…}) | | {…} | +| [enable_logging](variables.tf#L65) | Enable query logging for this zone. Only valid for public zones. | bool | | false | +| [forwarders](variables.tf#L59) | Map of {IPV4_ADDRESS => FORWARDING_PATH} for 'forwarding' zone types. Path can be 'default', 'private', or null for provider default. | map(string) | | {} | +| [peer_network](variables.tf#L77) | Peering network self link, only valid for 'peering' zone types. | string | | null | +| [recordsets](variables.tf#L88) | Map of DNS recordsets in \"type name\" => {ttl, [records]} format. | map(object({…})) | | {} | +| [service_directory_namespace](variables.tf#L123) | Service directory namespace id (URL), only valid for 'service-directory' zone types. | string | | null | +| [type](variables.tf#L129) | Type of zone to create, valid values are 'public', 'private', 'forwarding', 'peering', 'service-directory'. | string | | "private" | +| [zone_create](variables.tf#L139) | Create zone. When set to false, uses a data source to reference existing zone. | bool | | true | ## Outputs diff --git a/modules/dns/main.tf b/modules/dns/main.tf index 3eec6036..c1687761 100644 --- a/modules/dns/main.tf +++ b/modules/dns/main.tf @@ -15,11 +15,42 @@ */ locals { - _recordsets = var.recordsets == null ? {} : var.recordsets - recordsets = { - for key, attrs in local._recordsets : + # split record name and type and set as keys in a map + _recordsets_0 = { + for key, attrs in var.recordsets : key => merge(attrs, zipmap(["type", "name"], split(" ", key))) } + # compute the final resource name for the recordset + _recordsets = { + for key, attrs in local._recordsets_0 : + key => merge(attrs, { + resource_name = ( + attrs.name == "" + ? var.domain + : ( + substr(attrs.name, -1, 1) == "." + ? attrs.name + : "${attrs.name}.${var.domain}" + ) + ) + }) + } + # split recordsets between regular, geo and wrr + geo_recordsets = { + for k, v in local._recordsets : + k => v + if v.geo_routing != null + } + regular_recordsets = { + for k, v in local._recordsets : + k => v + if v.records != null + } + wrr_recordsets = { + for k, v in local._recordsets : + k => v + if v.wrr_routing != null + } zone = ( var.zone_create ? try( @@ -117,24 +148,25 @@ resource "google_dns_managed_zone" "public" { visibility = "public" dynamic "dnssec_config" { - for_each = var.dnssec_config == {} ? [] : tolist([var.dnssec_config]) + for_each = var.dnssec_config == null ? [] : [1] iterator = config content { - kind = lookup(config.value, "kind", "dns#managedZoneDnsSecConfig") - non_existence = lookup(config.value, "non_existence", "nsec3") - state = lookup(config.value, "state", "off") + kind = "dns#managedZoneDnsSecConfig" + non_existence = var.dnssec_config.non_existence + state = var.dnssec_config.state default_key_specs { - algorithm = lookup(var.default_key_specs_key, "algorithm", "rsasha256") - key_length = lookup(var.default_key_specs_key, "key_length", 2048) - key_type = lookup(var.default_key_specs_key, "key_type", "keySigning") - kind = lookup(var.default_key_specs_key, "kind", "dns#dnsKeySpec") + algorithm = var.dnssec_config.key_signing_key.algorithm + key_length = var.dnssec_config.key_signing_key.key_length + key_type = "keySigning" + kind = "dns#dnsKeySpec" } + default_key_specs { - algorithm = lookup(var.default_key_specs_zone, "algorithm", "rsasha256") - key_length = lookup(var.default_key_specs_zone, "key_length", 1024) - key_type = lookup(var.default_key_specs_zone, "key_type", "zoneSigning") - kind = lookup(var.default_key_specs_zone, "kind", "dns#dnsKeySpec") + algorithm = var.dnssec_config.zone_signing_key.algorithm + key_length = var.dnssec_config.zone_signing_key.key_length + key_type = "zoneSigning" + kind = "dns#dnsKeySpec" } } } @@ -149,23 +181,72 @@ data "google_dns_keys" "dns_keys" { resource "google_dns_record_set" "cloud-static-records" { for_each = ( var.type == "public" || var.type == "private" - ? local.recordsets + ? local.regular_recordsets : {} ) project = var.project_id managed_zone = var.name - name = ( - each.value.name == "" - ? var.domain - : ( - substr(each.value.name, -1, 1) == "." - ? each.value.name - : "${each.value.name}.${var.domain}" - ) - ) - type = each.value.type - ttl = each.value.ttl - rrdatas = each.value.records + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + rrdatas = each.value.records + + depends_on = [ + google_dns_managed_zone.non-public, google_dns_managed_zone.public + ] +} + +resource "google_dns_record_set" "cloud-geo-records" { + for_each = ( + var.type == "public" || var.type == "private" + ? local.geo_recordsets + : {} + ) + project = var.project_id + managed_zone = var.name + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + + routing_policy { + dynamic "geo" { + for_each = each.value.geo_routing + iterator = policy + content { + location = policy.value.location + rrdatas = policy.value.records + } + } + } + + depends_on = [ + google_dns_managed_zone.non-public, google_dns_managed_zone.public + ] +} + +resource "google_dns_record_set" "cloud-wrr-records" { + for_each = ( + var.type == "public" || var.type == "private" + ? local.wrr_recordsets + : {} + ) + project = var.project_id + managed_zone = var.name + name = each.value.resource_name + type = each.value.type + ttl = each.value.ttl + + routing_policy { + dynamic "wrr" { + for_each = each.value.wrr_routing + iterator = policy + content { + weight = policy.value.weight + rrdatas = policy.value.records + } + } + } + depends_on = [ google_dns_managed_zone.non-public, google_dns_managed_zone.public ] diff --git a/modules/dns/variables.tf b/modules/dns/variables.tf index ba44c7d8..aafe6a1d 100644 --- a/modules/dns/variables.tf +++ b/modules/dns/variables.tf @@ -22,18 +22,7 @@ variable "client_networks" { description = "List of VPC self links that can see this zone." type = list(string) default = [] -} - -variable "default_key_specs_key" { - description = "DNSSEC default key signing specifications: algorithm, key_length, key_type, kind." - type = any - default = {} -} - -variable "default_key_specs_zone" { - description = "DNSSEC default zone signing specifications: algorithm, key_length, key_type, kind." - type = any - default = {} + nullable = false } variable "description" { @@ -43,9 +32,23 @@ variable "description" { } variable "dnssec_config" { - description = "DNSSEC configuration: kind, non_existence, state." - type = any - default = {} + description = "DNSSEC configuration for this zone." + type = object({ + non_existence = optional(string, "nsec3") + state = string + key_signing_key = optional(object( + { algorithm = string, key_length = number }), + { algorithm = "rsasha256", key_length = 2048 } + ) + zone_signing_key = optional(object( + { algorithm = string, key_length = number }), + { algorithm = "rsasha256", key_length = 1024 } + ) + }) + default = { + state = "off" + } + nullable = false } variable "domain" { @@ -59,6 +62,13 @@ variable "forwarders" { default = {} } +variable "enable_logging" { + description = "Enable query logging for this zone. Only valid for public zones." + type = bool + default = false + nullable = false +} + variable "name" { description = "Zone name, must be unique within the project." type = string @@ -78,17 +88,36 @@ variable "project_id" { variable "recordsets" { description = "Map of DNS recordsets in \"type name\" => {ttl, [records]} format." type = map(object({ - ttl = number - records = list(string) + ttl = optional(number, 300) + records = optional(list(string)) + geo_routing = optional(list(object({ + location = string + records = list(string) + }))) + wrr_routing = optional(list(object({ + weight = number + records = list(string) + }))) })) - default = {} + default = {} + nullable = false validation { condition = alltrue([ - for k, v in var.recordsets == null ? {} : var.recordsets : + for k, v in coalesce(var.recordsets, {}) : length(split(" ", k)) == 2 ]) error_message = "Recordsets must have keys in the format \"type name\"." } + validation { + condition = alltrue([ + for k, v in coalesce(var.recordsets, {}) : ( + (v.records != null && v.wrr_routing == null && v.geo_routing == null) || + (v.records == null && v.wrr_routing != null && v.geo_routing == null) || + (v.records == null && v.wrr_routing == null && v.geo_routing != null) + ) + ]) + error_message = "Only one of records, wrr_routing or geo_routing can be defined for each recordset." + } } variable "service_directory_namespace" { diff --git a/modules/dns/versions.tf b/modules/dns/versions.tf index b1c8c910..286536a6 100644 --- a/modules/dns/versions.tf +++ b/modules/dns/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/endpoints/versions.tf b/modules/endpoints/versions.tf index b1c8c910..286536a6 100644 --- a/modules/endpoints/versions.tf +++ b/modules/endpoints/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/folder/README.md b/modules/folder/README.md index 8f4c1bc3..2190eaac 100644 --- a/modules/folder/README.md +++ b/modules/folder/README.md @@ -26,27 +26,59 @@ module "folder" { ### Organization policies +To manage organization policies, the `orgpolicy.googleapis.com` service should be enabled in the quota project. + ```hcl module "folder" { source = "./fabric/modules/folder" parent = "organizations/1234567890" name = "Folder name" - policy_boolean = { - "constraints/compute.disableGuestAttributesAccess" = true - "constraints/compute.skipDefaultNetworkCreation" = true - } - policy_list = { + org_policies = { + "compute.disableGuestAttributesAccess" = { + enforce = true + } + "constraints/compute.skipDefaultNetworkCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(\"tagKeys/1234\", \"tagValues/1234\")" + title = "condition" + description = "test condition" + location = "somewhere" + } + enforce = true + } + ] + } + "constraints/iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } "constraints/compute.trustedImageProjects" = { - inherit_from_parent = null - suggested_value = null - status = true - values = ["projects/my-project"] + allow = { + values = ["projects/my-project"] + } + } + "constraints/compute.vmExternalIpAccess" = { + deny = { all = true } } } } -# tftest modules=1 resources=4 +# tftest modules=1 resources=8 ``` +### Organization policy factory + +See the [organization policy factory in the project module](../project#organization-policy-factory). + ### Firewall policy factory In the same way as for the [organization](../organization) module, the in-built factory allows you to define a single policy, using one file for rules, and an optional file for CIDR range substitution variables. Remember that non-absolute paths are relative to the root module (the folder where you run `terraform`). @@ -259,7 +291,7 @@ module "folder" { | [iam.tf](./iam.tf) | IAM bindings, roles and audit logging resources. | google_folder_iam_binding · google_folder_iam_member | | [logging.tf](./logging.tf) | Log sinks and supporting resources. | google_bigquery_dataset_iam_member · google_logging_folder_exclusion · google_logging_folder_sink · google_project_iam_member · google_pubsub_topic_iam_member · google_storage_bucket_iam_member | | [main.tf](./main.tf) | Module-level locals and resources. | google_essential_contacts_contact · google_folder | -| [organization-policies.tf](./organization-policies.tf) | Folder-level organization policies. | google_folder_organization_policy | +| [organization-policies.tf](./organization-policies.tf) | Folder-level organization policies. | google_org_policy_policy | | [outputs.tf](./outputs.tf) | Module outputs. | | | [tags.tf](./tags.tf) | None | google_tags_tag_binding | | [variables.tf](./variables.tf) | Module variables. | | @@ -282,10 +314,10 @@ module "folder" { | [logging_exclusions](variables.tf#L98) | Logging exclusions for this folder in the form {NAME -> FILTER}. | map(string) | | {} | | [logging_sinks](variables.tf#L105) | Logging sinks to create for this folder. | map(object({…})) | | {} | | [name](variables.tf#L126) | Folder name. | string | | null | -| [parent](variables.tf#L132) | Parent in folders/folder_id or organizations/org_id format. | string | | null | -| [policy_boolean](variables.tf#L142) | Map of boolean org policies and enforcement value, set value to null for policy restore. | map(bool) | | {} | -| [policy_list](variables.tf#L149) | Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny. | map(object({…})) | | {} | -| [tag_bindings](variables.tf#L161) | Tag bindings for this folder, in key => tag value id format. | map(string) | | null | +| [org_policies](variables.tf#L132) | Organization policies applied to this folder keyed by policy name. | map(object({…})) | | {} | +| [org_policies_data_path](variables.tf#L172) | Path containing org policies in YAML format. | string | | null | +| [parent](variables.tf#L178) | Parent in folders/folder_id or organizations/org_id format. | string | | null | +| [tag_bindings](variables.tf#L188) | Tag bindings for this folder, in key => tag value id format. | map(string) | | null | ## Outputs @@ -295,7 +327,7 @@ module "folder" { | [firewall_policy_id](outputs.tf#L21) | Map of firewall policy ids created in this folder. | | | [folder](outputs.tf#L26) | Folder resource. | | | [id](outputs.tf#L31) | Folder id. | | -| [name](outputs.tf#L41) | Folder name. | | -| [sink_writer_identities](outputs.tf#L46) | Writer identities created for each sink. | | +| [name](outputs.tf#L40) | Folder name. | | +| [sink_writer_identities](outputs.tf#L45) | Writer identities created for each sink. | | diff --git a/modules/folder/organization-policies.tf b/modules/folder/organization-policies.tf index 177a3d80..3766005a 100644 --- a/modules/folder/organization-policies.tf +++ b/modules/folder/organization-policies.tf @@ -16,75 +16,131 @@ # tfdoc:file:description Folder-level organization policies. -resource "google_folder_organization_policy" "boolean" { - for_each = var.policy_boolean - folder = local.folder.name - constraint = each.key +locals { + _factory_data_raw = ( + var.org_policies_data_path == null + ? tomap({}) + : merge([ + for f in fileset(var.org_policies_data_path, "*.yaml") : + yamldecode(file("${var.org_policies_data_path}/${f}")) + ]...) + ) - dynamic "boolean_policy" { - for_each = each.value == null ? [] : [each.value] - iterator = policy - content { - enforced = policy.value + # simulate applying defaults to data coming from yaml files + _factory_data = { + for k, v in local._factory_data_raw : + k => { + inherit_from_parent = try(v.inherit_from_parent, null) + reset = try(v.reset, null) + allow = can(v.allow) ? { + all = try(v.allow.all, null) + values = try(v.allow.values, null) + } : null + deny = can(v.deny) ? { + all = try(v.deny.all, null) + values = try(v.deny.values, null) + } : null + enforce = try(v.enforce, true) + + rules = [ + for r in try(v.rules, []) : { + allow = can(r.allow) ? { + all = try(r.allow.all, null) + values = try(r.allow.values, null) + } : null + deny = can(r.deny) ? { + all = try(r.deny.all, null) + values = try(r.deny.values, null) + } : null + enforce = try(r.enforce, true) + condition = { + description = try(r.condition.description, null) + expression = try(r.condition.expression, null) + location = try(r.condition.location, null) + title = try(r.condition.title, null) + } + } + ] } } - dynamic "restore_policy" { - for_each = each.value == null ? [""] : [] - content { - default = true - } + _org_policies = merge(local._factory_data, var.org_policies) + + org_policies = { + for k, v in local._org_policies : + k => merge(v, { + name = "${local.folder.name}/policies/${k}" + parent = local.folder.name + + is_boolean_policy = v.allow == null && v.deny == null + has_values = ( + length(coalesce(try(v.allow.values, []), [])) > 0 || + length(coalesce(try(v.deny.values, []), [])) > 0 + ) + rules = [ + for r in v.rules : + merge(r, { + has_values = ( + length(coalesce(try(r.allow.values, []), [])) > 0 || + length(coalesce(try(r.deny.values, []), [])) > 0 + ) + }) + ] + }) } } -resource "google_folder_organization_policy" "list" { - for_each = var.policy_list - folder = local.folder.name - constraint = each.key +resource "google_org_policy_policy" "default" { + for_each = local.org_policies + name = each.value.name + parent = each.value.parent - dynamic "list_policy" { - for_each = each.value.status == null ? [] : [each.value] - iterator = policy - content { - inherit_from_parent = policy.value.inherit_from_parent - suggested_value = policy.value.suggested_value - dynamic "allow" { - for_each = policy.value.status ? [""] : [] + spec { + inherit_from_parent = each.value.inherit_from_parent + reset = each.value.reset + + rules { + allow_all = try(each.value.allow.all, null) == true ? "TRUE" : null + deny_all = try(each.value.deny.all, null) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && each.value.enforce != null + ? upper(tostring(each.value.enforce)) + : null + ) + dynamic "values" { + for_each = each.value.has_values ? [1] : [] content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) + allowed_values = try(each.value.allow.values, null) + denied_values = try(each.value.deny.values, null) } } - dynamic "deny" { - for_each = policy.value.status ? [] : [""] - content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) + } + + dynamic "rules" { + for_each = each.value.rules + iterator = rule + content { + allow_all = try(rule.value.allow.all, false) == true ? "TRUE" : null + deny_all = try(rule.value.deny.all, false) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && rule.value.enforce != null + ? upper(tostring(rule.value.enforce)) + : null + ) + condition { + description = rule.value.condition.description + expression = rule.value.condition.expression + location = rule.value.condition.location + title = rule.value.condition.title + } + dynamic "values" { + for_each = rule.value.has_values ? [1] : [] + content { + allowed_values = try(rule.value.allow.values, null) + denied_values = try(rule.value.deny.values, null) + } } } } } - - dynamic "restore_policy" { - for_each = each.value.status == null ? [true] : [] - content { - default = true - } - } } diff --git a/modules/folder/outputs.tf b/modules/folder/outputs.tf index 37babc6f..8073951b 100644 --- a/modules/folder/outputs.tf +++ b/modules/folder/outputs.tf @@ -33,8 +33,7 @@ output "id" { value = local.folder.name depends_on = [ google_folder_iam_binding.authoritative, - google_folder_organization_policy.boolean, - google_folder_organization_policy.list + google_org_policy_policy.default, ] } diff --git a/modules/folder/variables.tf b/modules/folder/variables.tf index 19ed18f3..359531b7 100644 --- a/modules/folder/variables.tf +++ b/modules/folder/variables.tf @@ -129,6 +129,52 @@ variable "name" { default = null } +variable "org_policies" { + description = "Organization policies applied to this folder keyed by policy name." + type = map(object({ + inherit_from_parent = optional(bool) # for list policies only. + reset = optional(bool) + + # default (unconditional) values + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + + # conditional values + rules = optional(list(object({ + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + condition = object({ + description = optional(string) + expression = optional(string) + location = optional(string) + title = optional(string) + }) + })), []) + })) + default = {} + nullable = false +} + +variable "org_policies_data_path" { + description = "Path containing org policies in YAML format." + type = string + default = null +} + variable "parent" { description = "Parent in folders/folder_id or organizations/org_id format." type = string @@ -139,25 +185,6 @@ variable "parent" { } } -variable "policy_boolean" { - description = "Map of boolean org policies and enforcement value, set value to null for policy restore." - type = map(bool) - default = {} - nullable = false -} - -variable "policy_list" { - description = "Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny." - type = map(object({ - inherit_from_parent = bool - suggested_value = string - status = bool - values = list(string) - })) - default = {} - nullable = false -} - variable "tag_bindings" { description = "Tag bindings for this folder, in key => tag value id format." type = map(string) diff --git a/modules/folder/versions.tf b/modules/folder/versions.tf index b1c8c910..286536a6 100644 --- a/modules/folder/versions.tf +++ b/modules/folder/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/gcs/versions.tf b/modules/gcs/versions.tf index b1c8c910..286536a6 100644 --- a/modules/gcs/versions.tf +++ b/modules/gcs/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/gke-cluster/README.md b/modules/gke-cluster/README.md index be0a9f62..55b594c6 100644 --- a/modules/gke-cluster/README.md +++ b/modules/gke-cluster/README.md @@ -77,9 +77,9 @@ module "cluster-1" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [location](variables.tf#L117) | Cluster zone or region. | string | ✓ | | -| [name](variables.tf#L169) | Cluster name. | string | ✓ | | -| [project_id](variables.tf#L195) | Cluster project id. | string | ✓ | | -| [vpc_config](variables.tf#L206) | VPC-level configuration. | object({…}) | ✓ | | +| [name](variables.tf#L174) | Cluster name. | string | ✓ | | +| [project_id](variables.tf#L200) | Cluster project id. | string | ✓ | | +| [vpc_config](variables.tf#L211) | VPC-level configuration. | object({…}) | ✓ | | | [cluster_autoscaling](variables.tf#L17) | Enable and configure limits for Node Auto-Provisioning with Cluster Autoscaler. | object({…}) | | null | | [description](variables.tf#L38) | Cluster description. | string | | null | | [enable_addons](variables.tf#L44) | Addons enabled in the cluster (true means enabled). | object({…}) | | {…} | @@ -90,10 +90,10 @@ module "cluster-1" { | [maintenance_config](variables.tf#L128) | Maintenance window configuration. | object({…}) | | {…} | | [max_pods_per_node](variables.tf#L151) | Maximum number of pods per node in this cluster. | number | | 110 | | [min_master_version](variables.tf#L157) | Minimum version of the master, defaults to the version of the most recent official release. | string | | null | -| [monitoring_config](variables.tf#L163) | Monitoring components. | list(string) | | ["SYSTEM_COMPONENTS"] | -| [node_locations](variables.tf#L174) | Zones in which the cluster's nodes are located. | list(string) | | [] | -| [private_cluster_config](variables.tf#L181) | Private cluster configuration. | object({…}) | | null | -| [release_channel](variables.tf#L200) | Release channel for GKE upgrades. | string | | null | +| [monitoring_config](variables.tf#L163) | Monitoring components. | object({…}) | | {…} | +| [node_locations](variables.tf#L179) | Zones in which the cluster's nodes are located. | list(string) | | [] | +| [private_cluster_config](variables.tf#L186) | Private cluster configuration. | object({…}) | | null | +| [release_channel](variables.tf#L205) | Release channel for GKE upgrades. | string | | null | ## Outputs diff --git a/modules/gke-cluster/main.tf b/modules/gke-cluster/main.tf index 9981d9b4..bc94dd37 100644 --- a/modules/gke-cluster/main.tf +++ b/modules/gke-cluster/main.tf @@ -41,7 +41,7 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 remove_default_node_pool = var.enable_features.autopilot ? null : true datapath_provider = ( - var.enable_features.dataplane_v2 + var.enable_features.dataplane_v2 || var.enable_features.autopilot ? "ADVANCED_DATAPATH" : "DATAPATH_PROVIDER_UNSPECIFIED" ) @@ -240,7 +240,15 @@ resource "google_container_cluster" "cluster" { dynamic "monitoring_config" { for_each = var.monitoring_config != null && !var.enable_features.autopilot ? [""] : [] content { - enable_components = var.monitoring_config + enable_components = var.monitoring_config.enable_components + dynamic "managed_prometheus" { + for_each = ( + try(var.monitoring_config.managed_prometheus, null) == true ? [""] : [] + ) + content { + enabled = true + } + } } } diff --git a/modules/gke-cluster/variables.tf b/modules/gke-cluster/variables.tf index a227d5c7..f9a3b69e 100644 --- a/modules/gke-cluster/variables.tf +++ b/modules/gke-cluster/variables.tf @@ -162,8 +162,13 @@ variable "min_master_version" { variable "monitoring_config" { description = "Monitoring components." - type = list(string) - default = ["SYSTEM_COMPONENTS"] + type = object({ + enable_components = optional(list(string)) + managed_prometheus = optional(bool) + }) + default = { + enable_components = ["SYSTEM_COMPONENTS"] + } } variable "name" { diff --git a/modules/gke-cluster/versions.tf b/modules/gke-cluster/versions.tf index b1c8c910..286536a6 100644 --- a/modules/gke-cluster/versions.tf +++ b/modules/gke-cluster/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/gke-hub/README.md b/modules/gke-hub/README.md index 2573ac9d..1a3c547c 100644 --- a/modules/gke-hub/README.md +++ b/modules/gke-hub/README.md @@ -257,7 +257,7 @@ module "cluster_1_nodepool" { location = "europe-west1" name = "nodepool" node_count = { initial = 1 } - service_account = {} + service_account = { create = true } tags = ["cluster-1-node"] } @@ -292,7 +292,7 @@ module "cluster_2_nodepool" { location = "europe-west4" name = "nodepool" node_count = { initial = 1 } - service_account = {} + service_account = { create = true } tags = ["cluster-2-node"] } diff --git a/modules/gke-hub/versions.tf b/modules/gke-hub/versions.tf index b1c8c910..286536a6 100644 --- a/modules/gke-hub/versions.tf +++ b/modules/gke-hub/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/gke-nodepool/README.md b/modules/gke-nodepool/README.md index d464656f..4c471c60 100644 --- a/modules/gke-nodepool/README.md +++ b/modules/gke-nodepool/README.md @@ -21,7 +21,13 @@ module "cluster-1-nodepool-1" { ### Internally managed service account -To have the module auto-create a service account for the nodes, define the `service_account` variable without setting its `email` attribute. You can then specify service account scopes, or use the default. The service account resource and email (in both plain and IAM formats) are then available in outputs to assign IAM roles from your own code. +There are three different approaches to defining the nodes service account, all depending on the `service_account` variable where the `create` attribute controls creation of a new service account by this module, and the `email` attribute controls the actual service account to use. + +If you create a new service account, its resource and email (in both plain and IAM formats) are then available in outputs to reference it in other modules or resources. + +#### GCE default service account + +To use the GCE default service account, you can ignore the variable which is equivalent to `{ create = null, email = null }`. ```hcl module "cluster-1-nodepool-1" { @@ -30,7 +36,44 @@ module "cluster-1-nodepool-1" { cluster_name = "cluster-1" location = "europe-west1-b" name = "nodepool-1" - service_account = {} +} +# tftest modules=1 resources=1 +``` + +#### Externally defined service account + +To use an existing service account, pass in just the `email` attribute. + +```hcl +module "cluster-1-nodepool-1" { + source = "./fabric/modules/gke-nodepool" + project_id = "myproject" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + service_account = { + email = "foo-bar@myproject.iam.gserviceaccount.com" + } +} +# tftest modules=1 resources=1 +``` + +#### Auto-created service account + +To have the module create a service account, set the `create` attribute to `true` and optionally pass the desired account id in `email`. + +```hcl +module "cluster-1-nodepool-1" { + source = "./fabric/modules/gke-nodepool" + project_id = "myproject" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + service_account = { + create = true + # optional + email = "spam-eggs" + } } # tftest modules=1 resources=2 ``` @@ -53,10 +96,10 @@ module "cluster-1-nodepool-1" { | [nodepool_config](variables.tf#L109) | Nodepool-level configuration. | object({…}) | | null | | [pod_range](variables.tf#L131) | Pod secondary range configuration. | object({…}) | | null | | [reservation_affinity](variables.tf#L148) | Configuration of the desired reservation which instances could take capacity from. | object({…}) | | null | -| [service_account](variables.tf#L158) | Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used. | object({…}) | | null | -| [sole_tenant_nodegroup](variables.tf#L167) | Sole tenant node group. | string | | null | -| [tags](variables.tf#L173) | Network tags applied to nodes. | list(string) | | null | -| [taints](variables.tf#L179) | Kubernetes taints applied to all nodes. | list(object({…})) | | null | +| [service_account](variables.tf#L158) | Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used. | object({…}) | | {} | +| [sole_tenant_nodegroup](variables.tf#L169) | Sole tenant node group. | string | | null | +| [tags](variables.tf#L175) | Network tags applied to nodes. | list(string) | | null | +| [taints](variables.tf#L181) | Kubernetes taints applied to all nodes. | list(object({…})) | | null | ## Outputs diff --git a/modules/gke-nodepool/main.tf b/modules/gke-nodepool/main.tf index 6a3714f0..0c35c8d0 100644 --- a/modules/gke-nodepool/main.tf +++ b/modules/gke-nodepool/main.tf @@ -31,17 +31,14 @@ locals { ) # if no attributes passed for service account, use the GCE default # if no email specified, create service account - service_account_create = ( - var.service_account != null && try(var.service_account.email, null) == null - ) service_account_email = ( - local.service_account_create + var.service_account.create ? google_service_account.service_account[0].email - : try(var.service_account.email, null) + : var.service_account.email ) service_account_scopes = ( - try(var.service_account.scopes, null) != null - ? var.service_account.scopes + var.service_account.oauth_scopes != null + ? var.service_account.oauth_scopes : [ "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", @@ -60,9 +57,13 @@ locals { } resource "google_service_account" "service_account" { - count = local.service_account_create ? 1 : 0 - project = var.project_id - account_id = "tf-gke-${var.name}" + count = var.service_account.create ? 1 : 0 + project = var.project_id + account_id = ( + var.service_account.email != null + ? split("@", var.service_account.email)[0] + : "tf-gke-${var.name}" + ) display_name = "Terraform GKE ${var.cluster_name} ${var.name}." } diff --git a/modules/gke-nodepool/variables.tf b/modules/gke-nodepool/variables.tf index dec5b823..15c8a151 100644 --- a/modules/gke-nodepool/variables.tf +++ b/modules/gke-nodepool/variables.tf @@ -158,10 +158,12 @@ variable "reservation_affinity" { variable "service_account" { description = "Nodepool service account. If this variable is set to null, the default GCE service account will be used. If set and email is null, a service account will be created. If scopes are null a default will be used." type = object({ - email = optional(string) - oauth_scopes = optional(list(string)) + create = optional(bool, false) + email = optional(string, null) + oauth_scopes = optional(list(string), null) }) - default = null + default = {} + nullable = false } variable "sole_tenant_nodegroup" { diff --git a/modules/gke-nodepool/versions.tf b/modules/gke-nodepool/versions.tf index b1c8c910..286536a6 100644 --- a/modules/gke-nodepool/versions.tf +++ b/modules/gke-nodepool/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/iam-service-account/versions.tf b/modules/iam-service-account/versions.tf index b1c8c910..286536a6 100644 --- a/modules/iam-service-account/versions.tf +++ b/modules/iam-service-account/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/kms/versions.tf b/modules/kms/versions.tf index b1c8c910..286536a6 100644 --- a/modules/kms/versions.tf +++ b/modules/kms/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/logging-bucket/versions.tf b/modules/logging-bucket/versions.tf index b1c8c910..286536a6 100644 --- a/modules/logging-bucket/versions.tf +++ b/modules/logging-bucket/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-address/versions.tf b/modules/net-address/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-address/versions.tf +++ b/modules/net-address/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-cloudnat/versions.tf b/modules/net-cloudnat/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-cloudnat/versions.tf +++ b/modules/net-cloudnat/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-glb/versions.tf b/modules/net-glb/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-glb/versions.tf +++ b/modules/net-glb/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-ilb-l7/versions.tf b/modules/net-ilb-l7/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-ilb-l7/versions.tf +++ b/modules/net-ilb-l7/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-ilb/README.md b/modules/net-ilb/README.md index 5637f0ef..9916f736 100644 --- a/modules/net-ilb/README.md +++ b/modules/net-ilb/README.md @@ -23,11 +23,13 @@ module "ilb" { region = "europe-west1" name = "ilb-test" service_label = "ilb-test" - network = var.vpc.self_link - subnetwork = var.subnet.self_link + vpc_config = { + network = var.vpc.self_link + subnetwork = var.subnet.self_link + } group_configs = { my-group = { - zone = "europe-west1-b", named_ports = null + zone = "europe-west1-b" instances = [ "instance-1-self-link", "instance-2-self-link" @@ -35,12 +37,12 @@ module "ilb" { } } backends = [{ - failover = false group = module.ilb.groups.my-group.self_link - balancing_mode = "CONNECTION" }] health_check_config = { - type = "http", check = { port = 80 }, config = {}, logging = true + http = { + port = 80 + } } } # tftest modules=1 resources=4 @@ -91,18 +93,21 @@ module "ilb" { region = "europe-west1" name = "ilb-test" service_label = "ilb-test" - network = var.vpc.self_link - subnetwork = var.subnet.self_link + vpc_config = { + network = var.vpc.self_link + subnetwork = var.subnet.self_link + } ports = [80] backends = [ for z, mod in module.instance-group : { - failover = false group = mod.group.self_link - balancing_mode = "CONNECTION" + balancing_mode = "UTILIZATION" } ] health_check_config = { - type = "http", check = { port = 80 }, config = {}, logging = true + http = { + port = 80 + } } } # tftest modules=3 resources=7 @@ -113,37 +118,36 @@ module "ilb" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [backends](variables.tf#L33) | Load balancer backends, balancing mode is one of 'CONNECTION' or 'UTILIZATION'. | list(object({…})) | ✓ | | -| [name](variables.tf#L98) | Name used for all resources. | string | ✓ | | -| [network](variables.tf#L103) | Network used for resources. | string | ✓ | | -| [project_id](variables.tf#L114) | Project id where resources will be created. | string | ✓ | | -| [region](variables.tf#L125) | GCP region. | string | ✓ | | -| [subnetwork](variables.tf#L136) | Subnetwork used for the forwarding rule. | string | ✓ | | +| [name](variables.tf#L184) | Name used for all resources. | string | ✓ | | +| [project_id](variables.tf#L195) | Project id where resources will be created. | string | ✓ | | +| [region](variables.tf#L206) | GCP region. | string | ✓ | | +| [vpc_config](variables.tf#L217) | VPC-level configuration. | object({…}) | ✓ | | | [address](variables.tf#L17) | Optional IP address used for the forwarding rule. | string | | null | -| [backend_config](variables.tf#L23) | Optional backend configuration. | object({…}) | | null | -| [failover_config](variables.tf#L42) | Optional failover configuration. | object({…}) | | null | -| [global_access](variables.tf#L52) | Global access, defaults to false if not set. | bool | | null | -| [group_configs](variables.tf#L58) | Optional unmanaged groups to create. Can be referenced in backends via outputs. | map(object({…})) | | {} | -| [health_check](variables.tf#L68) | Name of existing health check to use, disables auto-created health check. | string | | null | -| [health_check_config](variables.tf#L74) | Configuration of the auto-created helth check. | object({…}) | | {…} | -| [labels](variables.tf#L92) | Labels set on resources. | map(string) | | {} | -| [ports](variables.tf#L108) | Comma-separated ports, leave null to use all ports. | list(string) | | null | -| [protocol](variables.tf#L119) | IP protocol used, defaults to TCP. | string | | "TCP" | -| [service_label](variables.tf#L130) | Optional prefix of the fully qualified forwarding rule name. | string | | null | +| [backend_service_config](variables.tf#L23) | Backend service level configuration. | object({…}) | | {} | +| [backends](variables.tf#L56) | Load balancer backends, balancing mode is one of 'CONNECTION' or 'UTILIZATION'. | list(object({…})) | | [] | +| [description](variables.tf#L75) | Optional description used for resources. | string | | "Terraform managed." | +| [global_access](variables.tf#L81) | Global access, defaults to false if not set. | bool | | null | +| [group_configs](variables.tf#L87) | Optional unmanaged groups to create. Can be referenced in backends via outputs. | map(object({…})) | | {} | +| [health_check](variables.tf#L98) | Name of existing health check to use, disables auto-created health check. | string | | null | +| [health_check_config](variables.tf#L104) | Optional auto-created health check configuration, use the output self-link to set it in the auto healing policy. Refer to examples for usage. | object({…}) | | {…} | +| [labels](variables.tf#L178) | Labels set on resources. | map(string) | | {} | +| [ports](variables.tf#L189) | Comma-separated ports, leave null to use all ports. | list(string) | | null | +| [protocol](variables.tf#L200) | IP protocol used, defaults to TCP. | string | | "TCP" | +| [service_label](variables.tf#L211) | Optional prefix of the fully qualified forwarding rule name. | string | | null | ## Outputs | name | description | sensitive | |---|---|:---:| -| [backend](outputs.tf#L17) | Backend resource. | | -| [backend_id](outputs.tf#L22) | Backend id. | | -| [backend_self_link](outputs.tf#L27) | Backend self link. | | +| [backend_service](outputs.tf#L17) | Backend resource. | | +| [backend_service_id](outputs.tf#L22) | Backend id. | | +| [backend_service_self_link](outputs.tf#L27) | Backend self link. | | | [forwarding_rule](outputs.tf#L32) | Forwarding rule resource. | | | [forwarding_rule_address](outputs.tf#L37) | Forwarding rule address. | | | [forwarding_rule_id](outputs.tf#L42) | Forwarding rule id. | | | [forwarding_rule_self_link](outputs.tf#L47) | Forwarding rule self link. | | -| [group_self_links](outputs.tf#L52) | Optional unmanaged instance group self links. | | -| [groups](outputs.tf#L59) | Optional unmanaged instance group resources. | | +| [group_self_links](outputs.tf#L57) | Optional unmanaged instance group self links. | | +| [groups](outputs.tf#L52) | Optional unmanaged instance group resources. | | | [health_check](outputs.tf#L64) | Auto-created health-check resource. | | | [health_check_self_id](outputs.tf#L69) | Auto-created health-check self id. | | | [health_check_self_link](outputs.tf#L74) | Auto-created health-check self link. | | diff --git a/modules/net-ilb/groups.tf b/modules/net-ilb/groups.tf new file mode 100644 index 00000000..fe8bf13d --- /dev/null +++ b/modules/net-ilb/groups.tf @@ -0,0 +1,33 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# tfdoc:file:description Optional instance group resources. + +resource "google_compute_instance_group" "unmanaged" { + for_each = var.group_configs + project = var.project_id + zone = each.value.zone + name = each.key + description = "Terraform-managed." + instances = each.value.instances + dynamic "named_port" { + for_each = each.value.named_ports + content { + name = named_port.key + port = named_port.value + } + } +} diff --git a/modules/net-ilb/health-check.tf b/modules/net-ilb/health-check.tf new file mode 100644 index 00000000..4a4ed40d --- /dev/null +++ b/modules/net-ilb/health-check.tf @@ -0,0 +1,119 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# tfdoc:file:description Health check resource. + +locals { + hc = var.health_check_config + hc_grpc = try(local.hc.grpc, null) != null + hc_http = try(local.hc.http, null) != null + hc_http2 = try(local.hc.http2, null) != null + hc_https = try(local.hc.https, null) != null + hc_ssl = try(local.hc.ssl, null) != null + hc_tcp = try(local.hc.tcp, null) != null +} + +resource "google_compute_health_check" "default" { + provider = google-beta + count = local.hc != null ? 1 : 0 + project = var.project_id + name = var.name + description = local.hc.description + check_interval_sec = local.hc.check_interval_sec + healthy_threshold = local.hc.healthy_threshold + timeout_sec = local.hc.timeout_sec + unhealthy_threshold = local.hc.unhealthy_threshold + + dynamic "grpc_health_check" { + for_each = local.hc_grpc ? [""] : [] + content { + port = local.hc.grpc.port + port_name = local.hc.grpc.port_name + port_specification = local.hc.grpc.port_specification + grpc_service_name = local.hc.grpc.service_name + } + } + + dynamic "http_health_check" { + for_each = local.hc_http ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "http2_health_check" { + for_each = local.hc_http2 ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "https_health_check" { + for_each = local.hc_https ? [""] : [] + content { + host = local.hc.http.host + port = local.hc.http.port + port_name = local.hc.http.port_name + port_specification = local.hc.http.port_specification + proxy_header = local.hc.http.proxy_header + request_path = local.hc.http.request_path + response = local.hc.http.response + } + } + + dynamic "ssl_health_check" { + for_each = local.hc_ssl ? [""] : [] + content { + port = local.hc.tcp.port + port_name = local.hc.tcp.port_name + port_specification = local.hc.tcp.port_specification + proxy_header = local.hc.tcp.proxy_header + request = local.hc.tcp.request + response = local.hc.tcp.response + } + } + + dynamic "tcp_health_check" { + for_each = local.hc_tcp ? [""] : [] + content { + port = local.hc.tcp.port + port_name = local.hc.tcp.port_name + port_specification = local.hc.tcp.port_specification + proxy_header = local.hc.tcp.proxy_header + request = local.hc.tcp.request + response = local.hc.tcp.response + } + } + + dynamic "log_config" { + for_each = try(local.hc.enable_logging, null) == true ? [""] : [] + content { + enable = true + } + } +} diff --git a/modules/net-ilb/main.tf b/modules/net-ilb/main.tf index aa4addcc..be4c5786 100644 --- a/modules/net-ilb/main.tf +++ b/modules/net-ilb/main.tf @@ -16,252 +16,100 @@ locals { + bs_conntrack = var.backend_service_config.connection_tracking + bs_failover = var.backend_service_config.failover_config health_check = ( var.health_check != null ? var.health_check - : try(local.health_check_resource.self_link, null) + : google_compute_health_check.default.0.self_link ) - health_check_resource = try( - google_compute_health_check.http.0, - google_compute_health_check.https.0, - google_compute_health_check.tcp.0, - google_compute_health_check.ssl.0, - google_compute_health_check.http2.0, - {} - ) - health_check_type = try(var.health_check_config.type, null) } resource "google_compute_forwarding_rule" "default" { - provider = google-beta - project = var.project_id - name = var.name - description = "Terraform managed." + provider = google-beta + project = var.project_id + region = var.region + name = var.name + description = var.description + ip_address = var.address + ip_protocol = var.protocol # TCP | UDP + backend_service = ( + google_compute_region_backend_service.default.self_link + ) load_balancing_scheme = "INTERNAL" - region = var.region - network = var.network - subnetwork = var.subnetwork - ip_address = var.address - ip_protocol = var.protocol # TCP | UDP - ports = var.ports # "nnnnn" or "nnnnn,nnnnn,nnnnn" max 5 - service_label = var.service_label - all_ports = var.ports == null ? true : null + network = var.vpc_config.network + ports = var.ports # "nnnnn" or "nnnnn,nnnnn,nnnnn" max 5 + subnetwork = var.vpc_config.subnetwork allow_global_access = var.global_access - backend_service = google_compute_region_backend_service.default.self_link + labels = var.labels + all_ports = var.ports == null ? true : null + service_label = var.service_label # is_mirroring_collector = false - labels = var.labels } resource "google_compute_region_backend_service" "default" { - provider = google-beta - project = var.project_id - name = var.name - description = "Terraform managed." - load_balancing_scheme = "INTERNAL" - region = var.region - network = var.network - health_checks = [local.health_check] - protocol = var.protocol - - session_affinity = try(var.backend_config.session_affinity, null) - timeout_sec = try(var.backend_config.timeout_sec, null) - connection_draining_timeout_sec = try(var.backend_config.connection_draining_timeout_sec, null) + provider = google-beta + project = var.project_id + region = var.region + name = var.name + description = var.description + load_balancing_scheme = "INTERNAL" + protocol = var.protocol + network = var.vpc_config.network + health_checks = [local.health_check] + connection_draining_timeout_sec = var.backend_service_config.connection_draining_timeout_sec + session_affinity = var.backend_service_config.session_affinity + timeout_sec = var.backend_service_config.timeout_sec dynamic "backend" { for_each = { for b in var.backends : b.group => b } - iterator = backend content { balancing_mode = backend.value.balancing_mode - description = "Terraform managed." + description = backend.value.description failover = backend.value.failover group = backend.key } } + dynamic "connection_tracking_policy" { + for_each = local.bs_conntrack == null ? [] : [""] + content { + connection_persistence_on_unhealthy_backends = ( + local.bs_conntrack.persist_conn_on_unhealthy != null + ? local.bs_conntrack.persist_conn_on_unhealthy + : null + ) + idle_timeout_sec = local.bs_conntrack.idle_timeout_sec + tracking_mode = ( + local.bs_conntrack.track_per_session != null + ? local.bs_conntrack.track_per_session + : null + ) + } + } + dynamic "failover_policy" { - for_each = var.failover_config == null ? [] : [var.failover_config] - iterator = config + for_each = local.bs_failover == null ? [] : [""] content { - disable_connection_drain_on_failover = config.value.disable_connection_drain - drop_traffic_if_unhealthy = config.value.drop_traffic_if_unhealthy - failover_ratio = config.value.ratio + disable_connection_drain_on_failover = local.bs_failover.disable_conn_drain + drop_traffic_if_unhealthy = local.bs_failover.drop_traffic_if_unhealthy + failover_ratio = local.bs_failover.ratio } } -} - -resource "google_compute_instance_group" "unmanaged" { - for_each = var.group_configs - project = var.project_id - zone = each.value.zone - name = each.key - description = "Terraform-managed." - instances = each.value.instances - dynamic "named_port" { - for_each = each.value.named_ports != null ? each.value.named_ports : {} - iterator = config - content { - name = config.key - port = config.value - } - } -} - -resource "google_compute_health_check" "http" { - provider = google-beta - count = ( - var.health_check == null && local.health_check_type == "http" ? 1 : 0 - ) - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - http_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] + for_each = var.backend_service_config.log_sample_rate == null ? [] : [""] content { - enable = true + enable = true + sample_rate = var.backend_service_config.log_sample_rate } } -} -resource "google_compute_health_check" "https" { - provider = google-beta - count = ( - var.health_check == null && local.health_check_type == "https" ? 1 : 0 - ) - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - https_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] + dynamic "subsetting" { + for_each = var.backend_service_config.enable_subsetting == true ? [""] : [] content { - enable = true + policy = "CONSISTENT_HASH_SUBSETTING" } } + } - -resource "google_compute_health_check" "tcp" { - provider = google-beta - count = ( - var.health_check == null && local.health_check_type == "tcp" ? 1 : 0 - ) - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - tcp_health_check { - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request = try(var.health_check_config.check.request, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "ssl" { - provider = google-beta - count = ( - var.health_check == null && local.health_check_type == "ssl" ? 1 : 0 - ) - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - ssl_health_check { - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request = try(var.health_check_config.check.request, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - -resource "google_compute_health_check" "http2" { - provider = google-beta - count = ( - var.health_check == null && local.health_check_type == "http2" ? 1 : 0 - ) - project = var.project_id - name = var.name - description = "Terraform managed." - - check_interval_sec = try(var.health_check_config.config.check_interval_sec, null) - healthy_threshold = try(var.health_check_config.config.healthy_threshold, null) - timeout_sec = try(var.health_check_config.config.timeout_sec, null) - unhealthy_threshold = try(var.health_check_config.config.unhealthy_threshold, null) - - http2_health_check { - host = try(var.health_check_config.check.host, null) - port = try(var.health_check_config.check.port, null) - port_name = try(var.health_check_config.check.port_name, null) - port_specification = try(var.health_check_config.check.port_specification, null) - proxy_header = try(var.health_check_config.check.proxy_header, null) - request_path = try(var.health_check_config.check.request_path, null) - response = try(var.health_check_config.check.response, null) - } - - dynamic "log_config" { - for_each = try(var.health_check_config.logging, false) ? [""] : [] - content { - enable = true - } - } -} - diff --git a/modules/net-ilb/outputs.tf b/modules/net-ilb/outputs.tf index 55b454e1..3f8eb9e4 100644 --- a/modules/net-ilb/outputs.tf +++ b/modules/net-ilb/outputs.tf @@ -14,17 +14,17 @@ * limitations under the License. */ -output "backend" { +output "backend_service" { description = "Backend resource." value = google_compute_region_backend_service.default } -output "backend_id" { +output "backend_service_id" { description = "Backend id." value = google_compute_region_backend_service.default.id } -output "backend_self_link" { +output "backend_service_self_link" { description = "Backend self link." value = google_compute_region_backend_service.default.self_link } @@ -49,6 +49,11 @@ output "forwarding_rule_self_link" { value = google_compute_forwarding_rule.default.self_link } +output "groups" { + description = "Optional unmanaged instance group resources." + value = google_compute_instance_group.unmanaged +} + output "group_self_links" { description = "Optional unmanaged instance group self links." value = { @@ -56,22 +61,17 @@ output "group_self_links" { } } -output "groups" { - description = "Optional unmanaged instance group resources." - value = google_compute_instance_group.unmanaged -} - output "health_check" { description = "Auto-created health-check resource." - value = local.health_check_resource + value = try(google_compute_health_check.default.0, null) } output "health_check_self_id" { description = "Auto-created health-check self id." - value = try(local.health_check_resource.id, null) + value = try(google_compute_health_check.default.0.id, null) } output "health_check_self_link" { description = "Auto-created health-check self link." - value = try(local.health_check_resource.self_link, null) + value = try(google_compute_health_check.default.0.self_link, null) } diff --git a/modules/net-ilb/variables.tf b/modules/net-ilb/variables.tf index 638aee52..d2ffc5a6 100644 --- a/modules/net-ilb/variables.tf +++ b/modules/net-ilb/variables.tf @@ -20,33 +20,62 @@ variable "address" { default = null } -variable "backend_config" { - description = "Optional backend configuration." +variable "backend_service_config" { + description = "Backend service level configuration." type = object({ - session_affinity = string - timeout_sec = number - connection_draining_timeout_sec = number + connection_draining_timeout_sec = optional(number) + connection_tracking = optional(object({ + idle_timeout_sec = optional(number) + persist_conn_on_unhealthy = optional(string) + track_per_session = optional(bool) + })) + enable_subsetting = optional(bool) + failover_config = optional(object({ + disable_conn_drain = optional(bool) + drop_traffic_if_unhealthy = optional(bool) + ratio = optional(number) + })) + log_sample_rate = optional(number) + session_affinity = optional(string) + timeout_sec = optional(number) }) - default = null + default = {} + nullable = false + validation { + condition = contains( + [ + "NONE", "CLIENT_IP", "CLIENT_IP_NO_DESTINATION", + "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO" + ], + coalesce(var.backend_service_config.session_affinity, "NONE") + ) + error_message = "Invalid session affinity value." + } } variable "backends" { description = "Load balancer backends, balancing mode is one of 'CONNECTION' or 'UTILIZATION'." type = list(object({ - failover = bool group = string - balancing_mode = string + balancing_mode = optional(string, "CONNECTION") + description = optional(string, "Terraform managed.") + failover = optional(bool, false) })) + default = [] + nullable = false + validation { + condition = alltrue([ + for b in var.backends : contains( + ["CONNECTION", "UTILIZATION"], coalesce(b.balancing_mode, "CONNECTION") + )]) + error_message = "When specified balancing mode needs to be 'CONNECTION' or 'UTILIZATION'." + } } -variable "failover_config" { - description = "Optional failover configuration." - type = object({ - disable_connection_drain = bool - drop_traffic_if_unhealthy = bool - ratio = number - }) - default = null +variable "description" { + description = "Optional description used for resources." + type = string + default = "Terraform managed." } variable "global_access" { @@ -58,11 +87,12 @@ variable "global_access" { variable "group_configs" { description = "Optional unmanaged groups to create. Can be referenced in backends via outputs." type = map(object({ - instances = list(string) - named_ports = map(number) zone = string + instances = optional(list(string), []) + named_ports = optional(map(number), {}) })) - default = {} + default = {} + nullable = false } variable "health_check" { @@ -72,20 +102,76 @@ variable "health_check" { } variable "health_check_config" { - description = "Configuration of the auto-created helth check." + description = "Optional auto-created health check configuration, use the output self-link to set it in the auto healing policy. Refer to examples for usage." type = object({ - type = string # http https tcp ssl http2 - check = map(any) # actual health check block attributes - config = map(number) # interval, thresholds, timeout - logging = bool + check_interval_sec = optional(number) + description = optional(string, "Terraform managed.") + enable_logging = optional(bool, false) + healthy_threshold = optional(number) + timeout_sec = optional(number) + unhealthy_threshold = optional(number) + grpc = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + service_name = optional(string) + })) + http = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + http2 = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + https = optional(object({ + host = optional(string) + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request_path = optional(string) + response = optional(string) + })) + tcp = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request = optional(string) + response = optional(string) + })) + ssl = optional(object({ + port = optional(number) + port_name = optional(string) + port_specification = optional(string) # USE_FIXED_PORT USE_NAMED_PORT USE_SERVING_PORT + proxy_header = optional(string) + request = optional(string) + response = optional(string) + })) }) default = { - type = "http" - check = { + tcp = { port_specification = "USE_SERVING_PORT" } - config = {} - logging = false + } + validation { + condition = ( + (try(var.health_check_config.grpc, null) == null ? 0 : 1) + + (try(var.health_check_config.http, null) == null ? 0 : 1) + + (try(var.health_check_config.tcp, null) == null ? 0 : 1) <= 1 + ) + error_message = "Only one health check type can be configured at a time." } } @@ -100,11 +186,6 @@ variable "name" { type = string } -variable "network" { - description = "Network used for resources." - type = string -} - variable "ports" { description = "Comma-separated ports, leave null to use all ports." type = list(string) @@ -133,7 +214,11 @@ variable "service_label" { default = null } -variable "subnetwork" { - description = "Subnetwork used for the forwarding rule." - type = string +variable "vpc_config" { + description = "VPC-level configuration." + type = object({ + network = string + subnetwork = string + }) + nullable = false } diff --git a/modules/net-ilb/versions.tf b/modules/net-ilb/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-ilb/versions.tf +++ b/modules/net-ilb/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-interconnect-attachment-direct/versions.tf b/modules/net-interconnect-attachment-direct/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-interconnect-attachment-direct/versions.tf +++ b/modules/net-interconnect-attachment-direct/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpc-firewall/versions.tf b/modules/net-vpc-firewall/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpc-firewall/versions.tf +++ b/modules/net-vpc-firewall/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpc-peering/versions.tf b/modules/net-vpc-peering/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpc-peering/versions.tf +++ b/modules/net-vpc-peering/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpc/README.md b/modules/net-vpc/README.md index 84377bd8..0d6a231e 100644 --- a/modules/net-vpc/README.md +++ b/modules/net-vpc/README.md @@ -276,8 +276,8 @@ flow_logs: # enable, set to empty map to use defaults | [subnet_iam](variables.tf#L133) | Subnet IAM bindings in {REGION/NAME => {ROLE => [MEMBERS]} format. | map(map(list(string))) | | {} | | [subnets](variables.tf#L139) | Subnet configuration. | list(object({…})) | | [] | | [subnets_proxy_only](variables.tf#L164) | List of proxy-only subnets for Regional HTTPS or Internal HTTPS load balancers. Note: Only one proxy-only subnet for each VPC network in each region can be active. | list(object({…})) | | [] | -| [subnets_psc](variables.tf#L176) | List of subnets for Private Service Connect service producers. | list(object({…})) | | [] | -| [vpc_create](variables.tf#L186) | Create VPC. When set to false, uses a data source to reference existing VPC. | bool | | true | +| [subnets_psc](variables.tf#L176) | List of subnets for Private Service Connect service producers. | list(object({…})) | | [] | +| [vpc_create](variables.tf#L187) | Create VPC. When set to false, uses a data source to reference existing VPC. | bool | | true | ## Outputs diff --git a/modules/net-vpc/subnets.tf b/modules/net-vpc/subnets.tf index 0496405b..ae094ecf 100644 --- a/modules/net-vpc/subnets.tf +++ b/modules/net-vpc/subnets.tf @@ -72,13 +72,17 @@ locals { } resource "google_compute_subnetwork" "subnetwork" { - for_each = local.subnets - project = var.project_id - network = local.network.name - name = each.value.name - region = each.value.region - ip_cidr_range = each.value.ip_cidr_range - description = try(each.value.description, "Terraform-managed.") + for_each = local.subnets + project = var.project_id + network = local.network.name + name = each.value.name + region = each.value.region + ip_cidr_range = each.value.ip_cidr_range + description = ( + each.value.description == null + ? "Terraform-managed." + : each.value.description + ) private_ip_google_access = each.value.enable_private_access secondary_ip_range = each.value.secondary_ip_ranges == null ? [] : [ for name, range in each.value.secondary_ip_ranges : @@ -107,9 +111,10 @@ resource "google_compute_subnetwork" "proxy_only" { name = each.value.name region = each.value.region ip_cidr_range = each.value.ip_cidr_range - description = try( - each.value.description, - "Terraform-managed proxy-only subnet for Regional HTTPS or Internal HTTPS LB." + description = ( + each.value.description == null + ? "Terraform-managed proxy-only subnet for Regional HTTPS or Internal HTTPS LB." + : each.value.description ) purpose = "REGIONAL_MANAGED_PROXY" role = ( @@ -124,9 +129,10 @@ resource "google_compute_subnetwork" "psc" { name = each.value.name region = each.value.region ip_cidr_range = each.value.ip_cidr_range - description = try( - each.value.description, - "Terraform-managed subnet for Private Service Connect (PSC NAT)." + description = ( + each.value.description == null + ? "Terraform-managed subnet for Private Service Connect (PSC NAT)." + : each.value.description ) purpose = "PRIVATE_SERVICE_CONNECT" } diff --git a/modules/net-vpc/variables.tf b/modules/net-vpc/variables.tf index 89207479..a7aa2077 100644 --- a/modules/net-vpc/variables.tf +++ b/modules/net-vpc/variables.tf @@ -179,6 +179,7 @@ variable "subnets_psc" { name = string ip_cidr_range = string region = string + description = optional(string) })) default = [] } diff --git a/modules/net-vpc/versions.tf b/modules/net-vpc/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpc/versions.tf +++ b/modules/net-vpc/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpn-dynamic/versions.tf b/modules/net-vpn-dynamic/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpn-dynamic/versions.tf +++ b/modules/net-vpn-dynamic/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpn-ha/versions.tf b/modules/net-vpn-ha/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpn-ha/versions.tf +++ b/modules/net-vpn-ha/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/net-vpn-static/versions.tf b/modules/net-vpn-static/versions.tf index b1c8c910..286536a6 100644 --- a/modules/net-vpn-static/versions.tf +++ b/modules/net-vpn-static/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/organization-policy/README.md b/modules/organization-policy/README.md deleted file mode 100644 index 3b914170..00000000 --- a/modules/organization-policy/README.md +++ /dev/null @@ -1,166 +0,0 @@ -# Google Cloud Organization Policy - -This module allows creation and management of [GCP Organization Policies](https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints) by defining them in a well formatted `yaml` files or with HCL. - -Yaml based factory can simplify centralized management of Org Policies for a DevSecOps team by providing a simple way to define/structure policies and exclusions. - -> **_NOTE:_** This module uses experimental feature `module_variable_optional_attrs` which will be included into [terraform release 1.3](https://github.com/hashicorp/terraform/releases/tag/v1.3.0-alpha20220706). - -## Example - -### Terraform code - -```hcl -# using configuration provided in a set of yaml files -module "org-policy-factory" { - source = "./fabric/modules/organization-policy" - - config_directory = "./policies" -} - -# using configuration provided in the module variable -module "org-policy" { - source = "./fabric/modules/organization-policy" - - policies = { - "folders/1234567890" = { - # enforce boolean policy with no conditions - "iam.disableServiceAccountKeyUpload" = { - rules = [ - { - enforce = true - } - ] - }, - # Deny All for compute.vmCanIpForward policy - "compute.vmCanIpForward" = { - inherit_from_parent = false - rules = [ - deny = [] # stands for deny_all - ] - } - }, - "organizations/1234567890" = { - # allow only internal ingress when match condition env=prod - "run.allowedIngress" = { - rules = [ - { - allow = ["internal"] - condition = { - description= "allow ingress" - expression = "resource.matchTag('123456789/environment', 'prod')" - title = "allow-for-prod-org" - } - } - ] - } - } - } -} -# tftest skip -``` - -## Org Policy definition format and structure - -### Structure of `policies` variable - -```hcl -policies = { - "parent_id" = { # parent id in format projects/project-id, folders/1234567890 or organizations/1234567890. - "policy_name" = { # policy constraint id, for example compute.vmExternalIpAccess. - inherit_from_parent = true|false # (Optional) Only for list constraints. Determines the inheritance behavior for this policy. - reset = true|false # (Optional) Ignores policies set above this resource and restores the constraint_default enforcement behavior. - rules = [ # Up to 10 PolicyRules are allowed. - { - allow = ["value1", "value2"] # (Optional) Only for list constraints. Stands for `allow_all` if set to empty list `[]` or to `values.allowed_values` if set to a list of values - denyl = ["value3", "value4"] # (Optional) Only for list constraints. Stands for `deny_all` if set to empty list `[]` or to `values.denied_values` if set to a list of values - enforce = true|false # (Optional) Only for boolean constraints. If true, then the Policy is enforced. - condition = { # (Optional) A condition which determines whether this rule is used in the evaluation of the policy. - description = "Condition description" # (Optional) - expression = "Condition expression" # (Optional) For example "resource.matchTag('123456789/environment', 'prod')". - location = "policy-error.log" # (Optional) String indicating the location of the expression for error reporting. - title = "condition-title" # (Optional) - } - } - ] - } - } -} -# tftest skip -``` - -### Structure of configuration provided in a yaml file/s - -Configuration should be placed in a set of yaml files in the config directory. Policy entry structure as follows: - -```yaml -parent_id: # parent id in format projects/project-id, folders/1234567890 or organizations/1234567890. - policy_name1: # policy constraint id, for example compute.vmExternalIpAccess. - inherit_from_parent: true|false # (Optional) Only for list constraints. Determines the inheritance behavior for this policy. - reset: true|false # (Optional) Ignores policies set above this resource and restores the constraint_default enforcement behavior. - rules: - - allow: ["value1", "value2"] # (Optional) Only for list constraints. Stands for `allow_all` if set to empty list `[]` or to `values.allowed_values` if set to a list of values - deny: ["value3", "value4"] # (Optional) Only for list constraints. Stands for `deny_all` if set to empty list `[]` or to `values.denied_values` if set to a list of values - enforce: true|false # (Optional) Only for boolean constraints. If true, then the Policy is enforced. - condition: # (Optional) A condition which determines whether this rule is used in the evaluation of the policy. - description: Condition description # (Optional) - expression: Condition expression # (Optional) For example resource.matchTag("123456789/environment", "prod") - location: policy-error.log # (Optional) String indicating the location of the expression for error reporting. - title: condition-title # (Optional) -``` - -Module allows policies to be distributed into multiple yaml files for a better management and navigation. - -```bash -├── org-policies -│ ├── baseline.yaml -│   ├── image-import-projects.yaml -│   └── exclusions.yaml -``` - -Organization policies example yaml configuration - -```bash -cat ./policies/baseline.yaml -organizations/1234567890: - constraints/compute.vmExternalIpAccess: - rules: - - deny: [] # Stands for deny_all = true -folders/1234567890: - compute.vmCanIpForward: - inherit_from_parent: false - reset: false - rules: - - allow: [] # Stands for allow_all = true -projects/my-project-id: - run.allowedIngress: - inherit_from_parent: true - rules: - - allow: ['internal'] # Stands for values.allowed_values - condition: - description: allow internal ingress - expression: resource.matchTag("123456789/environment", "prod") - location: test.log - title: allow-for-prod - iam.allowServiceAccountCredentialLifetimeExtension: - rules: - - deny: [] # Stands for deny_all = true - compute.disableGlobalLoadBalancing: - reset: true -``` - - -## Variables - -| name | description | type | required | default | -|---|---|:---:|:---:|:---:| -| [config_directory](variables.tf#L17) | Paths to a folder where organization policy configs are stored in yaml format. Files suffix must be `.yaml`. | string | | null | -| [policies](variables.tf#L23) | Organization policies keyed by parent in format `projects/project-id`, `folders/1234567890` or `organizations/1234567890`. | map(map(object({…}))) | | {} | - -## Outputs - -| name | description | sensitive | -|---|---|:---:| -| [policies](outputs.tf#L17) | Organization policies. | | - - diff --git a/modules/organization-policy/main.tf b/modules/organization-policy/main.tf deleted file mode 100644 index 960a8462..00000000 --- a/modules/organization-policy/main.tf +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -locals { - policy_files = var.config_directory == null ? [] : concat( - [ - for config_file in fileset("${path.root}/${var.config_directory}", "**/*.yaml") : - "${path.root}/${var.config_directory}/${config_file}" - ] - ) - - policies_raw = merge( - merge( - [ - for config_file in local.policy_files : - try(yamldecode(file(config_file)), {}) - ]... - ), var.policies) - - policies_list = flatten([ - for parent, policies in local.policies_raw : [ - for policy_name, policy in policies : { - parent = parent, - policy_name = policy_name, - inherit_from_parent = try(policy["inherit_from_parent"], null), - reset = try(policy["reset"], null), - rules = [ - for rule in try(policy["rules"], []) : { - allow_all = try(length(rule["allow"]), -1) == 0 ? "TRUE" : null - deny_all = try(length(rule["deny"]), -1) == 0 ? "TRUE" : null - enforce = try(rule["enforce"], null) == true ? "TRUE" : try( - rule["enforce"], null) == false ? "FALSE" : null, - condition = try(rule["condition"], null) != null ? { - description = try(rule["condition"]["description"], null), - expression = try(rule["condition"]["expression"], null), - location = try(rule["condition"]["location"], null), - title = try(rule["condition"]["title"], null) - } : null, - values = try(length(rule["allow"]), 0) > 0 || try(length(rule["deny"]), 0) > 0 ? { - allowed_values = try(length(rule["allow"]), 0) > 0 ? rule["allow"] : null - denied_values = try(length(rule["deny"]), 0) > 0 ? rule["deny"] : null - } : null - } - ] - } - ] - ]) - - policies_map = { - for item in local.policies_list : - format("%s-%s", item["parent"], item["policy_name"]) => item - } -} - -resource "google_org_policy_policy" "primary" { - for_each = local.policies_map - name = format("%s/policies/%s", each.value.parent, each.value.policy_name) - parent = each.value.parent - - spec { - inherit_from_parent = each.value.inherit_from_parent - reset = each.value.reset - dynamic "rules" { - for_each = each.value.rules - content { - allow_all = rules.value.allow_all - deny_all = rules.value.deny_all - enforce = rules.value.enforce - dynamic "condition" { - for_each = rules.value.condition != null ? [""] : [] - content { - description = rules.value.condition.description - expression = rules.value.condition.expression - location = rules.value.condition.location - title = rules.value.condition.title - } - } - dynamic "values" { - for_each = rules.value.values != null ? [""] : [] - content { - allowed_values = rules.value.values.allowed_values - denied_values = rules.value.values.denied_values - } - } - } - } - } -} diff --git a/modules/organization-policy/variables.tf b/modules/organization-policy/variables.tf deleted file mode 100644 index ff842dd9..00000000 --- a/modules/organization-policy/variables.tf +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -variable "config_directory" { - description = "Paths to a folder where organization policy configs are stored in yaml format. Files suffix must be `.yaml`." - type = string - default = null -} - -variable "policies" { - description = "Organization policies keyed by parent in format `projects/project-id`, `folders/1234567890` or `organizations/1234567890`." - type = map(map(object({ - inherit_from_parent = optional(bool) # List policy only. - reset = optional(bool) - rules = optional( - list(object({ - allow = optional(list(string)) # List policy only. Stands for `allow_all` if set to empty list `[]` or to `values.allowed_values` if set to a list of values - deny = optional(list(string)) # List policy only. Stands for `deny_all` if set to empty list `[]` or to `values.denied_values` if set to a list of values - enforce = optional(bool) # Boolean policy only. - condition = optional( - object({ - description = optional(string) - expression = optional(string) - location = optional(string) - title = optional(string) - }) - ) - })) - ) - }))) - default = {} -} diff --git a/modules/organization/README.md b/modules/organization/README.md index 2377c6cc..84a7da84 100644 --- a/modules/organization/README.md +++ b/modules/organization/README.md @@ -7,6 +7,8 @@ This module allows managing several organization properties: - audit logging configuration for services - organization policies +To manage organization policies, the `orgpolicy.googleapis.com` service should be enabled in the quota project. + ## Example ```hcl @@ -19,20 +21,47 @@ module "org" { iam = { "roles/resourcemanager.projectCreator" = ["group:cloud-admins@example.org"] } - policy_boolean = { - "constraints/compute.disableGuestAttributesAccess" = true - "constraints/compute.skipDefaultNetworkCreation" = true - } - policy_list = { + + org_policies = { + "compute.disableGuestAttributesAccess" = { + enforce = true + } + "constraints/compute.skipDefaultNetworkCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(\"tagKeys/1234\", \"tagValues/1234\")" + title = "condition" + description = "test condition" + location = "somewhere" + } + enforce = true + } + ] + } + "constraints/iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } "constraints/compute.trustedImageProjects" = { - inherit_from_parent = null - suggested_value = null - status = true - values = ["projects/my-project"] + allow = { + values = ["projects/my-project"] + } + } + "constraints/compute.vmExternalIpAccess" = { + deny = { all = true } } } } -# tftest modules=1 resources=6 +# tftest modules=1 resources=10 ``` ## IAM @@ -47,6 +76,10 @@ If you set audit policies via the `iam_audit_config_authoritative` variable, be Some care must also be takend with the `groups_iam` variable (and in some situations with the additive variables) to ensure that variable keys are static values, so that Terraform is able to compute the dependency graph. +### Organization policy factory + +See the [organization policy factory in the project module](../project#organization-policy-factory). + ## Hierarchical firewall policies Hirerarchical firewall policies can be managed in two ways: @@ -281,7 +314,7 @@ module "org" { | [iam.tf](./iam.tf) | IAM bindings, roles and audit logging resources. | google_organization_iam_audit_config · google_organization_iam_binding · google_organization_iam_custom_role · google_organization_iam_member · google_organization_iam_policy | | [logging.tf](./logging.tf) | Log sinks and supporting resources. | google_bigquery_dataset_iam_member · google_logging_organization_exclusion · google_logging_organization_sink · google_project_iam_member · google_pubsub_topic_iam_member · google_storage_bucket_iam_member | | [main.tf](./main.tf) | Module-level locals and resources. | google_essential_contacts_contact | -| [organization-policies.tf](./organization-policies.tf) | Organization-level organization policies. | google_organization_policy | +| [organization-policies.tf](./organization-policies.tf) | Organization-level organization policies. | google_org_policy_policy | | [outputs.tf](./outputs.tf) | Module outputs. | | | [tags.tf](./tags.tf) | None | google_tags_tag_binding · google_tags_tag_key · google_tags_tag_key_iam_binding · google_tags_tag_value · google_tags_tag_value_iam_binding | | [variables.tf](./variables.tf) | Module variables. | | @@ -291,7 +324,7 @@ module "org" { | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [organization_id](variables.tf#L151) | Organization id in organizations/nnnnnn format. | string | ✓ | | +| [organization_id](variables.tf#L191) | Organization id in organizations/nnnnnn format. | string | ✓ | | | [contacts](variables.tf#L17) | List of essential contacts for this resource. Must be in the form EMAIL -> [NOTIFICATION_TYPES]. Valid notification types are ALL, SUSPENSION, SECURITY, TECHNICAL, BILLING, LEGAL, PRODUCT_UPDATES. | map(list(string)) | | {} | | [custom_roles](variables.tf#L24) | Map of role name => list of permissions to create in this project. | map(list(string)) | | {} | | [firewall_policies](variables.tf#L31) | Hierarchical firewall policy rules created in the organization. | map(map(object({…}))) | | {} | @@ -306,10 +339,10 @@ module "org" { | [iam_bindings_authoritative](variables.tf#L116) | IAM authoritative bindings, in {ROLE => [MEMBERS]} format. Roles and members not explicitly listed will be cleared. Bindings should also be authoritative when using authoritative audit config. Use with caution. | map(list(string)) | | null | | [logging_exclusions](variables.tf#L122) | Logging exclusions for this organization in the form {NAME -> FILTER}. | map(string) | | {} | | [logging_sinks](variables.tf#L129) | Logging sinks to create for this organization. | map(object({…})) | | {} | -| [policy_boolean](variables.tf#L160) | Map of boolean org policies and enforcement value, set value to null for policy restore. | map(bool) | | {} | -| [policy_list](variables.tf#L167) | Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny. | map(object({…})) | | {} | -| [tag_bindings](variables.tf#L179) | Tag bindings for this organization, in key => tag value id format. | map(string) | | null | -| [tags](variables.tf#L185) | Tags by key name. The `iam` attribute behaves like the similarly named one at module level. | map(object({…})) | | null | +| [org_policies](variables.tf#L151) | Organization policies applied to this organization keyed by policy name. | map(object({…})) | | {} | +| [org_policies_data_path](variables.tf#L200) | Path containing org policies in YAML format. | string | | null | +| [tag_bindings](variables.tf#L206) | Tag bindings for this organization, in key => tag value id format. | map(string) | | null | +| [tags](variables.tf#L212) | Tags by key name. The `iam` attribute behaves like the similarly named one at module level. | map(object({…})) | | null | ## Outputs @@ -320,8 +353,8 @@ module "org" { | [firewall_policies](outputs.tf#L36) | Map of firewall policy resources created in the organization. | | | [firewall_policy_id](outputs.tf#L41) | Map of firewall policy ids created in the organization. | | | [organization_id](outputs.tf#L46) | Organization id dependent on module resources. | | -| [sink_writer_identities](outputs.tf#L64) | Writer identities created for each sink. | | -| [tag_keys](outputs.tf#L72) | Tag key resources. | | -| [tag_values](outputs.tf#L79) | Tag value resources. | | +| [sink_writer_identities](outputs.tf#L63) | Writer identities created for each sink. | | +| [tag_keys](outputs.tf#L71) | Tag key resources. | | +| [tag_values](outputs.tf#L78) | Tag value resources. | | diff --git a/modules/organization/organization-policies.tf b/modules/organization/organization-policies.tf index f23a98b4..fcde1658 100644 --- a/modules/organization/organization-policies.tf +++ b/modules/organization/organization-policies.tf @@ -16,83 +16,131 @@ # tfdoc:file:description Organization-level organization policies. -resource "google_organization_policy" "boolean" { - for_each = var.policy_boolean - org_id = local.organization_id_numeric - constraint = each.key +locals { + _factory_data_raw = ( + var.org_policies_data_path == null + ? tomap({}) + : merge([ + for f in fileset(var.org_policies_data_path, "*.yaml") : + yamldecode(file("${var.org_policies_data_path}/${f}")) + ]...) + ) - dynamic "boolean_policy" { - for_each = each.value == null ? [] : [each.value] - iterator = policy - content { - enforced = policy.value + # simulate applying defaults to data coming from yaml files + _factory_data = { + for k, v in local._factory_data_raw : + k => { + inherit_from_parent = try(v.inherit_from_parent, null) + reset = try(v.reset, null) + allow = can(v.allow) ? { + all = try(v.allow.all, null) + values = try(v.allow.values, null) + } : null + deny = can(v.deny) ? { + all = try(v.deny.all, null) + values = try(v.deny.values, null) + } : null + enforce = try(v.enforce, true) + + rules = [ + for r in try(v.rules, []) : { + allow = can(r.allow) ? { + all = try(r.allow.all, null) + values = try(r.allow.values, null) + } : null + deny = can(r.deny) ? { + all = try(r.deny.all, null) + values = try(r.deny.values, null) + } : null + enforce = try(r.enforce, true) + condition = { + description = try(r.condition.description, null) + expression = try(r.condition.expression, null) + location = try(r.condition.location, null) + title = try(r.condition.title, null) + } + } + ] } } - dynamic "restore_policy" { - for_each = each.value == null ? [""] : [] - content { - default = true - } - } - - depends_on = [ - google_organization_iam_audit_config.config, - google_organization_iam_binding.authoritative, - google_organization_iam_custom_role.roles, - google_organization_iam_member.additive, - google_organization_iam_policy.authoritative, - ] -} - -resource "google_organization_policy" "list" { - for_each = var.policy_list - org_id = local.organization_id_numeric - constraint = each.key - - dynamic "list_policy" { - for_each = each.value.status == null ? [] : [each.value] - iterator = policy - content { - inherit_from_parent = policy.value.inherit_from_parent - suggested_value = policy.value.suggested_value - dynamic "allow" { - for_each = policy.value.status ? [""] : [] - content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) - } - } - dynamic "deny" { - for_each = policy.value.status ? [] : [""] - content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) - } - } - } - } - - dynamic "restore_policy" { - for_each = each.value.status == null ? [true] : [] - content { - default = true + _org_policies = merge(local._factory_data, var.org_policies) + + org_policies = { + for k, v in local._org_policies : + k => merge(v, { + name = "${var.organization_id}/policies/${k}" + parent = var.organization_id + + is_boolean_policy = v.allow == null && v.deny == null + has_values = ( + length(coalesce(try(v.allow.values, []), [])) > 0 || + length(coalesce(try(v.deny.values, []), [])) > 0 + ) + rules = [ + for r in v.rules : + merge(r, { + has_values = ( + length(coalesce(try(r.allow.values, []), [])) > 0 || + length(coalesce(try(r.deny.values, []), [])) > 0 + ) + }) + ] + }) + } +} + +resource "google_org_policy_policy" "default" { + for_each = local.org_policies + name = each.value.name + parent = each.value.parent + + spec { + inherit_from_parent = each.value.inherit_from_parent + reset = each.value.reset + + rules { + allow_all = try(each.value.allow.all, null) == true ? "TRUE" : null + deny_all = try(each.value.deny.all, null) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && each.value.enforce != null + ? upper(tostring(each.value.enforce)) + : null + ) + dynamic "values" { + for_each = each.value.has_values ? [1] : [] + content { + allowed_values = try(each.value.allow.values, null) + denied_values = try(each.value.deny.values, null) + } + } + } + + dynamic "rules" { + for_each = each.value.rules + iterator = rule + content { + allow_all = try(rule.value.allow.all, false) == true ? "TRUE" : null + deny_all = try(rule.value.deny.all, false) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && rule.value.enforce != null + ? upper(tostring(rule.value.enforce)) + : null + ) + condition { + description = rule.value.condition.description + expression = rule.value.condition.expression + location = rule.value.condition.location + title = rule.value.condition.title + } + dynamic "values" { + for_each = rule.value.has_values ? [1] : [] + content { + allowed_values = try(rule.value.allow.values, null) + denied_values = try(rule.value.deny.values, null) + } + } + } } } diff --git a/modules/organization/outputs.tf b/modules/organization/outputs.tf index 1679a1d7..198b3c8d 100644 --- a/modules/organization/outputs.tf +++ b/modules/organization/outputs.tf @@ -52,8 +52,7 @@ output "organization_id" { google_organization_iam_custom_role.roles, google_organization_iam_member.additive, google_organization_iam_policy.authoritative, - google_organization_policy.boolean, - google_organization_policy.list, + google_org_policy_policy.default, google_tags_tag_key.default, google_tags_tag_key_iam_binding.default, google_tags_tag_value.default, diff --git a/modules/organization/variables.tf b/modules/organization/variables.tf index 9ffce95c..7093a118 100644 --- a/modules/organization/variables.tf +++ b/modules/organization/variables.tf @@ -148,6 +148,46 @@ variable "logging_sinks" { nullable = false } +variable "org_policies" { + description = "Organization policies applied to this organization keyed by policy name." + type = map(object({ + inherit_from_parent = optional(bool) # for list policies only. + reset = optional(bool) + + # default (unconditional) values + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + + # conditional values + rules = optional(list(object({ + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + condition = object({ + description = optional(string) + expression = optional(string) + location = optional(string) + title = optional(string) + }) + })), []) + })) + default = {} + nullable = false +} + variable "organization_id" { description = "Organization id in organizations/nnnnnn format." type = string @@ -157,23 +197,10 @@ variable "organization_id" { } } -variable "policy_boolean" { - description = "Map of boolean org policies and enforcement value, set value to null for policy restore." - type = map(bool) - default = {} - nullable = false -} - -variable "policy_list" { - description = "Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny." - type = map(object({ - inherit_from_parent = bool - suggested_value = string - status = bool - values = list(string) - })) - default = {} - nullable = false +variable "org_policies_data_path" { + description = "Path containing org policies in YAML format." + type = string + default = null } variable "tag_bindings" { diff --git a/modules/organization/versions.tf b/modules/organization/versions.tf index b1c8c910..286536a6 100644 --- a/modules/organization/versions.tf +++ b/modules/organization/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/project/README.md b/modules/project/README.md index 9df30d18..37af720c 100644 --- a/modules/project/README.md +++ b/modules/project/README.md @@ -156,6 +156,8 @@ module "project" { ## Organization policies +To manage organization policies, the `orgpolicy.googleapis.com` service should be enabled in the quota project. + ```hcl module "project" { source = "./fabric/modules/project" @@ -167,22 +169,113 @@ module "project" { "container.googleapis.com", "stackdriver.googleapis.com" ] - policy_boolean = { - "constraints/compute.disableGuestAttributesAccess" = true - "constraints/compute.skipDefaultNetworkCreation" = true - } - policy_list = { + org_policies = { + "compute.disableGuestAttributesAccess" = { + enforce = true + } + "constraints/compute.skipDefaultNetworkCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(\"tagKeys/1234\", \"tagValues/1234\")" + title = "condition" + description = "test condition" + location = "somewhere" + } + enforce = true + } + ] + } + "constraints/iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } "constraints/compute.trustedImageProjects" = { - inherit_from_parent = null - suggested_value = null - status = true - values = ["projects/my-project"] + allow = { + values = ["projects/my-project"] + } + } + "constraints/compute.vmExternalIpAccess" = { + deny = { all = true } } } } -# tftest modules=1 resources=6 +# tftest modules=1 resources=10 ``` +### Organization policy factory + +Organization policies can be loaded from a directory containing YAML files where each file defines one or more constraints. The structure of the YAML files is exactly the same as the `org_policies` variable. + +Note that contraints defined via `org_policies` take precedence over those in `org_policies_data_path`. In other words, if you specify the same contraint in a YAML file *and* in the `org_policies` variable, the latter will take priority. + +The example below deploys a few organization policies split between two YAML files. + +```hcl +module "folder" { + source = "./fabric/modules/folder" + parent = "organizations/1234567890" + name = "Folder name" + org_policies_data_path = "/my/path" +} +# tftest skip +``` + +```yaml +# /my/path/boolean.yaml +iam.disableServiceAccountKeyCreation: + enforce: true + +iam.disableServiceAccountKeyUpload: + enforce: false + rules: + - condition: + expression: resource.matchTagId("tagKeys/1234", "tagValues/1234") + title: condition + description: test condition + location: xxx + enforce: true +``` + +```yaml +# /my/path/list.yaml +compute.vmExternalIpAccess: + deny: + all: true + +iam.allowedPolicyMemberDomains: + allow: + values: + - C0xxxxxxx + - C0yyyyyyy + +compute.restrictLoadBalancerCreationForTypes: + deny: + values: ["in:EXTERNAL"] + rules: + - condition: + expression: resource.matchTagId("tagKeys/1234", "tagValues/1234") + title: condition + description: test condition + allow: + values: ["in:EXTERNAL"] + - condition: + expression: resource.matchTagId("tagKeys/12345", "tagValues/12345") + title: condition2 + description: test condition2 + allow: + all: true +``` + + ## Logging Sinks ```hcl @@ -349,7 +442,7 @@ output "compute_robot" { | [iam.tf](./iam.tf) | Generic and OSLogin-specific IAM bindings and roles. | google_project_iam_binding · google_project_iam_custom_role · google_project_iam_member | | [logging.tf](./logging.tf) | Log sinks and supporting resources. | google_bigquery_dataset_iam_member · google_logging_project_exclusion · google_logging_project_sink · google_project_iam_member · google_pubsub_topic_iam_member · google_storage_bucket_iam_member | | [main.tf](./main.tf) | Module-level locals and resources. | google_compute_project_metadata_item · google_essential_contacts_contact · google_monitoring_monitored_project · google_project · google_project_service · google_resource_manager_lien | -| [organization-policies.tf](./organization-policies.tf) | Project-level organization policies. | google_project_organization_policy | +| [organization-policies.tf](./organization-policies.tf) | Project-level organization policies. | google_org_policy_policy | | [outputs.tf](./outputs.tf) | Module outputs. | | | [service-accounts.tf](./service-accounts.tf) | Service identities and supporting resources. | google_kms_crypto_key_iam_member · google_project_default_service_accounts · google_project_iam_member · google_project_service_identity | | [shared-vpc.tf](./shared-vpc.tf) | Shared VPC project-level configuration. | google_compute_shared_vpc_host_project · google_compute_shared_vpc_service_project · google_project_iam_member | @@ -367,8 +460,8 @@ output "compute_robot" { | [billing_account](variables.tf#L23) | Billing account id. | string | | null | | [contacts](variables.tf#L29) | List of essential contacts for this resource. Must be in the form EMAIL -> [NOTIFICATION_TYPES]. Valid notification types are ALL, SUSPENSION, SECURITY, TECHNICAL, BILLING, LEGAL, PRODUCT_UPDATES. | map(list(string)) | | {} | | [custom_roles](variables.tf#L36) | Map of role name => list of permissions to create in this project. | map(list(string)) | | {} | -| [default_service_account](variables.tf#L49) | Project default service account setting: can be one of `delete`, `deprivilege`, `disable`, or `keep`. | string | | "keep" | -| [descriptive_name](variables.tf#L43) | Name of the project name. Used for project name instead of `name` variable. | string | | null | +| [default_service_account](variables.tf#L43) | Project default service account setting: can be one of `delete`, `deprivilege`, `disable`, or `keep`. | string | | "keep" | +| [descriptive_name](variables.tf#L49) | Name of the project name. Used for project name instead of `name` variable. | string | | null | | [group_iam](variables.tf#L55) | Authoritative IAM binding for organization groups, in {GROUP_EMAIL => [ROLES]} format. Group emails need to be static. Can be used in combination with the `iam` variable. | map(list(string)) | | {} | | [iam](variables.tf#L62) | IAM bindings in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | [iam_additive](variables.tf#L69) | IAM additive bindings in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | @@ -378,23 +471,23 @@ output "compute_robot" { | [logging_exclusions](variables.tf#L95) | Logging exclusions for this project in the form {NAME -> FILTER}. | map(string) | | {} | | [logging_sinks](variables.tf#L102) | Logging sinks to create for this project. | map(object({…})) | | {} | | [metric_scopes](variables.tf#L124) | List of projects that will act as metric scopes for this project. | list(string) | | [] | -| [oslogin](variables.tf#L136) | Enable OS Login. | bool | | false | -| [oslogin_admins](variables.tf#L142) | List of IAM-style identities that will be granted roles necessary for OS Login administrators. | list(string) | | [] | -| [oslogin_users](variables.tf#L150) | List of IAM-style identities that will be granted roles necessary for OS Login users. | list(string) | | [] | -| [parent](variables.tf#L157) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | string | | null | -| [policy_boolean](variables.tf#L167) | Map of boolean org policies and enforcement value, set value to null for policy restore. | map(bool) | | {} | -| [policy_list](variables.tf#L174) | Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny. | map(object({…})) | | {} | -| [prefix](variables.tf#L186) | Prefix used to generate project id and name. | string | | null | -| [project_create](variables.tf#L192) | Create project. When set to false, uses a data source to reference existing project. | bool | | true | -| [service_config](variables.tf#L198) | Configure service API activation. | object({…}) | | {…} | -| [service_encryption_key_ids](variables.tf#L210) | Cloud KMS encryption key in {SERVICE => [KEY_URL]} format. | map(list(string)) | | {} | -| [service_perimeter_bridges](variables.tf#L217) | Name of VPC-SC Bridge perimeters to add project into. See comment in the variables file for format. | list(string) | | null | -| [service_perimeter_standard](variables.tf#L224) | Name of VPC-SC Standard perimeter to add project into. See comment in the variables file for format. | string | | null | -| [services](variables.tf#L230) | Service APIs to enable. | list(string) | | [] | -| [shared_vpc_host_config](variables.tf#L236) | Configures this project as a Shared VPC host project (mutually exclusive with shared_vpc_service_project). | object({…}) | | null | -| [shared_vpc_service_config](variables.tf#L245) | Configures this project as a Shared VPC service project (mutually exclusive with shared_vpc_host_config). | object({…}) | | null | -| [skip_delete](variables.tf#L255) | Allows the underlying resources to be destroyed without destroying the project itself. | bool | | false | -| [tag_bindings](variables.tf#L261) | Tag bindings for this project, in key => tag value id format. | map(string) | | null | +| [org_policies](variables.tf#L136) | Organization policies applied to this project keyed by policy name. | map(object({…})) | | {} | +| [org_policies_data_path](variables.tf#L176) | Path containing org policies in YAML format. | string | | null | +| [oslogin](variables.tf#L182) | Enable OS Login. | bool | | false | +| [oslogin_admins](variables.tf#L188) | List of IAM-style identities that will be granted roles necessary for OS Login administrators. | list(string) | | [] | +| [oslogin_users](variables.tf#L196) | List of IAM-style identities that will be granted roles necessary for OS Login users. | list(string) | | [] | +| [parent](variables.tf#L203) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | string | | null | +| [prefix](variables.tf#L213) | Prefix used to generate project id and name. | string | | null | +| [project_create](variables.tf#L219) | Create project. When set to false, uses a data source to reference existing project. | bool | | true | +| [service_config](variables.tf#L225) | Configure service API activation. | object({…}) | | {…} | +| [service_encryption_key_ids](variables.tf#L237) | Cloud KMS encryption key in {SERVICE => [KEY_URL]} format. | map(list(string)) | | {} | +| [service_perimeter_bridges](variables.tf#L244) | Name of VPC-SC Bridge perimeters to add project into. See comment in the variables file for format. | list(string) | | null | +| [service_perimeter_standard](variables.tf#L251) | Name of VPC-SC Standard perimeter to add project into. See comment in the variables file for format. | string | | null | +| [services](variables.tf#L257) | Service APIs to enable. | list(string) | | [] | +| [shared_vpc_host_config](variables.tf#L263) | Configures this project as a Shared VPC host project (mutually exclusive with shared_vpc_service_project). | object({…}) | | null | +| [shared_vpc_service_config](variables.tf#L272) | Configures this project as a Shared VPC service project (mutually exclusive with shared_vpc_host_config). | object({…}) | | null | +| [skip_delete](variables.tf#L282) | Allows the underlying resources to be destroyed without destroying the project itself. | bool | | false | +| [tag_bindings](variables.tf#L288) | Tag bindings for this project, in key => tag value id format. | map(string) | | null | ## Outputs @@ -402,9 +495,9 @@ output "compute_robot" { |---|---|:---:| | [custom_roles](outputs.tf#L17) | Ids of the created custom roles. | | | [name](outputs.tf#L25) | Project name. | | -| [number](outputs.tf#L38) | Project number. | | -| [project_id](outputs.tf#L56) | Project id. | | -| [service_accounts](outputs.tf#L76) | Product robot service accounts in project. | | -| [sink_writer_identities](outputs.tf#L92) | Writer identities created for each sink. | | +| [number](outputs.tf#L37) | Project number. | | +| [project_id](outputs.tf#L54) | Project id. | | +| [service_accounts](outputs.tf#L73) | Product robot service accounts in project. | | +| [sink_writer_identities](outputs.tf#L89) | Writer identities created for each sink. | | diff --git a/modules/project/organization-policies.tf b/modules/project/organization-policies.tf index 68707548..d64dd956 100644 --- a/modules/project/organization-policies.tf +++ b/modules/project/organization-policies.tf @@ -16,75 +16,131 @@ # tfdoc:file:description Project-level organization policies. -resource "google_project_organization_policy" "boolean" { - for_each = var.policy_boolean - project = local.project.project_id - constraint = each.key +locals { + _factory_data_raw = ( + var.org_policies_data_path == null + ? tomap({}) + : merge([ + for f in fileset(var.org_policies_data_path, "*.yaml") : + yamldecode(file("${var.org_policies_data_path}/${f}")) + ]...) + ) - dynamic "boolean_policy" { - for_each = each.value == null ? [] : [each.value] - iterator = policy - content { - enforced = policy.value + # simulate applying defaults to data coming from yaml files + _factory_data = { + for k, v in local._factory_data_raw : + k => { + inherit_from_parent = try(v.inherit_from_parent, null) + reset = try(v.reset, null) + allow = can(v.allow) ? { + all = try(v.allow.all, null) + values = try(v.allow.values, null) + } : null + deny = can(v.deny) ? { + all = try(v.deny.all, null) + values = try(v.deny.values, null) + } : null + enforce = try(v.enforce, true) + + rules = [ + for r in try(v.rules, []) : { + allow = can(r.allow) ? { + all = try(r.allow.all, null) + values = try(r.allow.values, null) + } : null + deny = can(r.deny) ? { + all = try(r.deny.all, null) + values = try(r.deny.values, null) + } : null + enforce = try(r.enforce, true) + condition = { + description = try(r.condition.description, null) + expression = try(r.condition.expression, null) + location = try(r.condition.location, null) + title = try(r.condition.title, null) + } + } + ] } } - dynamic "restore_policy" { - for_each = each.value == null ? [""] : [] - content { - default = true - } + _org_policies = merge(local._factory_data, var.org_policies) + + org_policies = { + for k, v in local._org_policies : + k => merge(v, { + name = "projects/${local.project.project_id}/policies/${k}" + parent = "projects/${local.project.project_id}" + + is_boolean_policy = v.allow == null && v.deny == null + has_values = ( + length(coalesce(try(v.allow.values, []), [])) > 0 || + length(coalesce(try(v.deny.values, []), [])) > 0 + ) + rules = [ + for r in v.rules : + merge(r, { + has_values = ( + length(coalesce(try(r.allow.values, []), [])) > 0 || + length(coalesce(try(r.deny.values, []), [])) > 0 + ) + }) + ] + }) } } -resource "google_project_organization_policy" "list" { - for_each = var.policy_list - project = local.project.project_id - constraint = each.key +resource "google_org_policy_policy" "default" { + for_each = local.org_policies + name = each.value.name + parent = each.value.parent - dynamic "list_policy" { - for_each = each.value.status == null ? [] : [each.value] - iterator = policy - content { - inherit_from_parent = policy.value.inherit_from_parent - suggested_value = policy.value.suggested_value - dynamic "allow" { - for_each = policy.value.status ? [""] : [] + spec { + inherit_from_parent = each.value.inherit_from_parent + reset = each.value.reset + + rules { + allow_all = try(each.value.allow.all, null) == true ? "TRUE" : null + deny_all = try(each.value.deny.all, null) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && each.value.enforce != null + ? upper(tostring(each.value.enforce)) + : null + ) + dynamic "values" { + for_each = each.value.has_values ? [1] : [] content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) + allowed_values = try(each.value.allow.values, null) + denied_values = try(each.value.deny.values, null) } } - dynamic "deny" { - for_each = policy.value.status ? [] : [""] - content { - values = ( - try(length(policy.value.values) > 0, false) - ? policy.value.values - : null - ) - all = ( - try(length(policy.value.values) > 0, false) - ? null - : true - ) + } + + dynamic "rules" { + for_each = each.value.rules + iterator = rule + content { + allow_all = try(rule.value.allow.all, false) == true ? "TRUE" : null + deny_all = try(rule.value.deny.all, false) == true ? "TRUE" : null + enforce = ( + each.value.is_boolean_policy && rule.value.enforce != null + ? upper(tostring(rule.value.enforce)) + : null + ) + condition { + description = rule.value.condition.description + expression = rule.value.condition.expression + location = rule.value.condition.location + title = rule.value.condition.title + } + dynamic "values" { + for_each = rule.value.has_values ? [1] : [] + content { + allowed_values = try(rule.value.allow.values, null) + denied_values = try(rule.value.deny.values, null) + } } } } } - - dynamic "restore_policy" { - for_each = each.value.status == null ? [true] : [] - content { - default = true - } - } } diff --git a/modules/project/outputs.tf b/modules/project/outputs.tf index 3b7efc90..cb940d01 100644 --- a/modules/project/outputs.tf +++ b/modules/project/outputs.tf @@ -26,8 +26,7 @@ output "name" { description = "Project name." value = local.project.name depends_on = [ - google_project_organization_policy.boolean, - google_project_organization_policy.list, + google_org_policy_policy.default, google_project_service.project_services, google_compute_shared_vpc_service_project.service_projects, google_project_iam_member.shared_vpc_host_robots, @@ -39,8 +38,7 @@ output "number" { description = "Project number." value = local.project.number depends_on = [ - google_project_organization_policy.boolean, - google_project_organization_policy.list, + google_org_policy_policy.default, google_project_service.project_services, google_compute_shared_vpc_host_project.shared_vpc_host, google_compute_shared_vpc_service_project.shared_vpc_service, @@ -59,8 +57,7 @@ output "project_id" { depends_on = [ google_project.project, data.google_project.project, - google_project_organization_policy.boolean, - google_project_organization_policy.list, + google_org_policy_policy.default, google_project_service.project_services, google_compute_shared_vpc_host_project.shared_vpc_host, google_compute_shared_vpc_service_project.shared_vpc_service, diff --git a/modules/project/variables.tf b/modules/project/variables.tf index 41d3163f..7cd36c82 100644 --- a/modules/project/variables.tf +++ b/modules/project/variables.tf @@ -40,18 +40,18 @@ variable "custom_roles" { nullable = false } -variable "descriptive_name" { - description = "Name of the project name. Used for project name instead of `name` variable." - type = string - default = null -} - variable "default_service_account" { description = "Project default service account setting: can be one of `delete`, `deprivilege`, `disable`, or `keep`." default = "keep" type = string } +variable "descriptive_name" { + description = "Name of the project name. Used for project name instead of `name` variable." + type = string + default = null +} + variable "group_iam" { description = "Authoritative IAM binding for organization groups, in {GROUP_EMAIL => [ROLES]} format. Group emails need to be static. Can be used in combination with the `iam` variable." type = map(list(string)) @@ -133,6 +133,52 @@ variable "name" { type = string } +variable "org_policies" { + description = "Organization policies applied to this project keyed by policy name." + type = map(object({ + inherit_from_parent = optional(bool) # for list policies only. + reset = optional(bool) + + # default (unconditional) values + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + + # conditional values + rules = optional(list(object({ + allow = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + deny = optional(object({ + all = optional(bool) + values = optional(list(string)) + })) + enforce = optional(bool, true) # for boolean policies only. + condition = object({ + description = optional(string) + expression = optional(string) + location = optional(string) + title = optional(string) + }) + })), []) + })) + default = {} + nullable = false +} + +variable "org_policies_data_path" { + description = "Path containing org policies in YAML format." + type = string + default = null +} + variable "oslogin" { description = "Enable OS Login." type = bool @@ -164,25 +210,6 @@ variable "parent" { } } -variable "policy_boolean" { - description = "Map of boolean org policies and enforcement value, set value to null for policy restore." - type = map(bool) - default = {} - nullable = false -} - -variable "policy_list" { - description = "Map of list org policies, status is true for allow, false for deny, null for restore. Values can only be used for allow or deny." - type = map(object({ - inherit_from_parent = bool - suggested_value = string - status = bool - values = list(string) - })) - default = {} - nullable = false -} - variable "prefix" { description = "Prefix used to generate project id and name." type = string diff --git a/modules/project/versions.tf b/modules/project/versions.tf index b1c8c910..286536a6 100644 --- a/modules/project/versions.tf +++ b/modules/project/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/projects-data-source/versions.tf b/modules/projects-data-source/versions.tf index b1c8c910..286536a6 100644 --- a/modules/projects-data-source/versions.tf +++ b/modules/projects-data-source/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/pubsub/versions.tf b/modules/pubsub/versions.tf index b1c8c910..286536a6 100644 --- a/modules/pubsub/versions.tf +++ b/modules/pubsub/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/secret-manager/versions.tf b/modules/secret-manager/versions.tf index b1c8c910..286536a6 100644 --- a/modules/secret-manager/versions.tf +++ b/modules/secret-manager/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/service-directory/versions.tf b/modules/service-directory/versions.tf index b1c8c910..286536a6 100644 --- a/modules/service-directory/versions.tf +++ b/modules/service-directory/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/source-repository/versions.tf b/modules/source-repository/versions.tf index b1c8c910..286536a6 100644 --- a/modules/source-repository/versions.tf +++ b/modules/source-repository/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/modules/vpc-sc/versions.tf b/modules/vpc-sc/versions.tf index b1c8c910..286536a6 100644 --- a/modules/vpc-sc/versions.tf +++ b/modules/vpc-sc/versions.tf @@ -17,11 +17,11 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } google-beta = { source = "hashicorp/google-beta" - version = ">= 4.36.0" # tftest + version = ">= 4.40.0" # tftest } } } diff --git a/tests/blueprints/cloud_operations/glb_and_armor/__init__.py b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/__init__.py similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/__init__.py rename to tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/__init__.py diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf new file mode 100644 index 00000000..3552740c --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/main.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "test" { + source = "../../../../../../blueprints/cloud-operations/terraform-enterprise-wif/gcp-workload-identity-provider" + billing_account = var.billing_account + project_create = var.project_create + project_id = var.project_id + parent = var.parent + tfe_organization_id = var.tfe_organization_id + tfe_workspace_id = var.tfe_workspace_id + workload_identity_pool_id = var.workload_identity_pool_id + workload_identity_pool_provider_id = var.workload_identity_pool_provider_id + issuer_uri = var.issuer_uri +} diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf new file mode 100644 index 00000000..d99981c0 --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/fixture/variables.tf @@ -0,0 +1,68 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +variable "billing_account" { + type = string + default = "1234-ABCD-1234" +} + +variable "project_create" { + type = bool + default = true +} + +variable "project_id" { + type = string + default = "project-1" +} + +variable "parent" { + description = "Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format." + type = string + default = null + validation { + condition = var.parent == null || can(regex("(organizations|folders)/[0-9]+", var.parent)) + error_message = "Parent must be of the form folders/folder_id or organizations/organization_id." + } +} + +variable "tfe_organization_id" { + description = "TFE organization id." + type = string + default = "org-123" +} + +variable "tfe_workspace_id" { + description = "TFE workspace id." + type = string + default = "ws-123" +} + +variable "workload_identity_pool_id" { + description = "Workload identity pool id." + type = string + default = "tfe-pool" +} + +variable "workload_identity_pool_provider_id" { + description = "Workload identity pool provider id." + type = string + default = "tfe-provider" +} + +variable "issuer_uri" { + description = "Terraform Enterprise uri. Replace the uri if a self hosted instance is used." + type = string + default = "https://app.terraform.io/" +} diff --git a/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py new file mode 100644 index 00000000..228e51df --- /dev/null +++ b/tests/blueprints/cloud_operations/terraform-enterprise-wif/gcp-workload-identity-provider/test_plan.py @@ -0,0 +1,19 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def test_resources(e2e_plan_runner): + "Test that plan works and the numbers of resources is as expected." + modules, resources = e2e_plan_runner() + assert len(modules) == 2 + assert len(resources) == 10 diff --git a/tests/blueprints/data_solutions/data_platform_foundations/test_plan.py b/tests/blueprints/data_solutions/data_platform_foundations/test_plan.py index 0e4b77f5..1b51472c 100644 --- a/tests/blueprints/data_solutions/data_platform_foundations/test_plan.py +++ b/tests/blueprints/data_solutions/data_platform_foundations/test_plan.py @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os import pytest - FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture') @@ -24,4 +22,4 @@ def test_resources(e2e_plan_runner): "Test that plan works and the numbers of resources is as expected." modules, resources = e2e_plan_runner(FIXTURES_DIR) assert len(modules) == 41 - assert len(resources) == 314 + assert len(resources) == 315 diff --git a/tests/blueprints/data_solutions/data_playground/test_plan.py b/tests/blueprints/data_solutions/data_playground/test_plan.py index 05bda08c..2653c7ea 100644 --- a/tests/blueprints/data_solutions/data_playground/test_plan.py +++ b/tests/blueprints/data_solutions/data_playground/test_plan.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os import pytest - FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture') + def test_resources(e2e_plan_runner): "Test that plan works and the numbers of resources is as expected." modules, resources = e2e_plan_runner(FIXTURES_DIR) assert len(modules) == 7 - assert len(resources) == 34 + assert len(resources) == 35 diff --git a/tests/modules/organization_policy/__init__.py b/tests/blueprints/networking/glb_and_armor/__init__.py similarity index 100% rename from tests/modules/organization_policy/__init__.py rename to tests/blueprints/networking/glb_and_armor/__init__.py diff --git a/tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf b/tests/blueprints/networking/glb_and_armor/fixture/main.tf similarity index 89% rename from tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf rename to tests/blueprints/networking/glb_and_armor/fixture/main.tf index e02d1093..155677b2 100644 --- a/tests/blueprints/cloud_operations/glb_and_armor/fixture/main.tf +++ b/tests/blueprints/networking/glb_and_armor/fixture/main.tf @@ -13,7 +13,7 @@ # limitations under the License. module "test" { - source = "../../../../../blueprints/cloud-operations/glb_and_armor" + source = "../../../../../blueprints/networking/glb-and-armor" project_create = var.project_create project_id = var.project_id enforce_security_policy = var.enforce_security_policy diff --git a/tests/blueprints/cloud_operations/glb_and_armor/fixture/variables.tf b/tests/blueprints/networking/glb_and_armor/fixture/variables.tf similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/fixture/variables.tf rename to tests/blueprints/networking/glb_and_armor/fixture/variables.tf diff --git a/tests/blueprints/cloud_operations/glb_and_armor/test_plan.py b/tests/blueprints/networking/glb_and_armor/test_plan.py similarity index 100% rename from tests/blueprints/cloud_operations/glb_and_armor/test_plan.py rename to tests/blueprints/networking/glb_and_armor/test_plan.py diff --git a/tests/conftest.py b/tests/conftest.py index a5ded070..3ec58107 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -98,8 +98,8 @@ def recursive_e2e_plan_runner(_plan_runner): def walk_plan(node, modules, resources): # TODO(jccb): this would be better with node.get() but # TerraformPlanOutput objects don't have it - new_modules = node['child_modules'] if 'child_modules' in node else [] - resources += node['resources'] if 'resources' in node else [] + new_modules = node.get('child_modules', []) + resources += node.get('resources', []) modules += new_modules for module in new_modules: walk_plan(module, modules, resources) diff --git a/tests/modules/apigee_organization/fixture/main.tf b/tests/modules/apigee_organization/fixture/main.tf index 9dfb49bc..37fa536b 100644 --- a/tests/modules/apigee_organization/fixture/main.tf +++ b/tests/modules/apigee_organization/fixture/main.tf @@ -21,10 +21,17 @@ module "test" { runtime_type = "CLOUD" billing_type = "EVALUATION" authorized_network = var.network - apigee_environments = [ - "eval1", - "eval2" - ] + apigee_environments = { + eval1 = { + api_proxy_type = "PROGRAMMABLE" + deployment_type = "PROXY" + } + eval2 = { + api_proxy_type = "CONFIGURABLE" + deployment_type = "ARCHIVE" + } + eval3 = {} + } apigee_envgroups = { eval = { environments = [ diff --git a/tests/modules/apigee_organization/test_plan.py b/tests/modules/apigee_organization/test_plan.py index ec2312c9..6e873bc0 100644 --- a/tests/modules/apigee_organization/test_plan.py +++ b/tests/modules/apigee_organization/test_plan.py @@ -23,7 +23,7 @@ def resources(plan_runner): def test_resource_count(resources): "Test number of resources created." - assert len(resources) == 6 + assert len(resources) == 7 def test_envgroup_attachment(resources): @@ -42,3 +42,19 @@ def test_envgroup(resources): assert envgroups[0]['name'] == 'eval' assert len(envgroups[0]['hostnames']) == 1 assert envgroups[0]['hostnames'][0] == 'eval.api.example.com' + + +def test_env(resources): + "Test environments." + envs = [r['values'] for r in resources if r['type'] + == 'google_apigee_environment'] + assert len(envs) == 3 + assert envs[0]['name'] == 'eval1' + assert envs[0]['api_proxy_type'] == 'PROGRAMMABLE' + assert envs[0]['deployment_type'] == 'PROXY' + assert envs[1]['name'] == 'eval2' + assert envs[1]['api_proxy_type'] == 'CONFIGURABLE' + assert envs[1]['deployment_type'] == 'ARCHIVE' + assert envs[2]['name'] == 'eval3' + assert envs[2]['api_proxy_type'] == 'API_PROXY_TYPE_UNSPECIFIED' + assert envs[2]['deployment_type'] == 'DEPLOYMENT_TYPE_UNSPECIFIED' diff --git a/tests/modules/compute_mig/fixture/main.tf b/tests/modules/compute_mig/fixture/main.tf index 5d87f40f..b91c0140 100644 --- a/tests/modules/compute_mig/fixture/main.tf +++ b/tests/modules/compute_mig/fixture/main.tf @@ -24,21 +24,18 @@ resource "google_compute_disk" "default" { } module "test" { - source = "../../../../modules/compute-mig" - project_id = "my-project" - location = "europe-west1" - name = "test-mig" - target_size = 2 - default_version = { - instance_template = "foo-template" - name = "foo" - } - autoscaler_config = var.autoscaler_config - health_check_config = var.health_check_config - named_ports = var.named_ports - regional = var.regional - stateful_config = var.stateful_config - - update_policy = var.update_policy - versions = var.versions + source = "../../../../modules/compute-mig" + project_id = "my-project" + name = "test-mig" + target_size = 2 + default_version_name = "foo" + instance_template = "foo-template" + location = var.location + autoscaler_config = var.autoscaler_config + health_check_config = var.health_check_config + named_ports = var.named_ports + stateful_config = var.stateful_config + stateful_disks = var.stateful_disks + update_policy = var.update_policy + versions = var.versions } diff --git a/tests/modules/compute_mig/fixture/variables.tf b/tests/modules/compute_mig/fixture/variables.tf index b9fde834..70117838 100644 --- a/tests/modules/compute_mig/fixture/variables.tf +++ b/tests/modules/compute_mig/fixture/variables.tf @@ -14,101 +14,82 @@ * limitations under the License. */ -variable "autoscaler_config" { - type = object({ - max_replicas = number - min_replicas = number - cooldown_period = number - cpu_utilization_target = number - load_balancing_utilization_target = number - metric = object({ - name = string - single_instance_assignment = number - target = number - type = string # GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE - filter = string - }) - }) +variable "all_instances_config" { + type = any default = null } variable "auto_healing_policies" { - type = object({ - health_check = string - initial_delay_sec = number - }) + type = any + default = null +} + +variable "autoscaler_config" { + type = any + default = null +} + +variable "default_version_name" { + type = any + default = "default" +} + +variable "description" { + type = any + default = "Terraform managed." +} + +variable "distribution_policy" { + type = any default = null } variable "health_check_config" { - type = object({ - type = string # http https tcp ssl http2 - check = map(any) # actual health check block attributes - config = map(number) # interval, thresholds, timeout - logging = bool - }) + type = any default = null } +variable "location" { + type = any + default = "europe-west1-b" +} + variable "named_ports" { - type = map(number) + type = any default = null } -variable "regional" { - type = bool - default = false +variable "stateful_disks" { + type = any + default = {} } variable "stateful_config" { - description = "Stateful configuration can be done by individual instances or for all instances in the MIG. They key in per_instance_config is the name of the specific instance. The key of the stateful_disks is the 'device_name' field of the resource. Please note that device_name is defined at the OS mount level, unlike the disk name." - type = object({ - per_instance_config = map(object({ - #name is the key - #name = string - stateful_disks = map(object({ - #device_name is the key - source = string - mode = string # READ_WRITE | READ_ONLY - delete_rule = string # NEVER | ON_PERMANENT_INSTANCE_DELETION - })) - metadata = map(string) - update_config = object({ - minimal_action = string # NONE | REPLACE | RESTART | REFRESH - most_disruptive_allowed_action = string # REPLACE | RESTART | REFRESH | NONE - remove_instance_state_on_destroy = bool - }) - })) + type = any + default = {} +} - mig_config = object({ - stateful_disks = map(object({ - #device_name is the key - delete_rule = string # NEVER | ON_PERMANENT_INSTANCE_DELETION - })) - }) +variable "target_pools" { + type = any + default = [] +} - }) +variable "target_size" { + type = any default = null } variable "update_policy" { - type = object({ - type = string # OPPORTUNISTIC | PROACTIVE - minimal_action = string # REPLACE | RESTART - min_ready_sec = number - max_surge_type = string # fixed | percent - max_surge = number - max_unavailable_type = string - max_unavailable = number - }) + type = any default = null } variable "versions" { - type = map(object({ - instance_template = string - target_type = string # fixed | percent - target_size = number - })) + type = any + default = {} +} + +variable "wait_for_instances" { + type = any default = null } diff --git a/tests/modules/compute_mig/test_plan.py b/tests/modules/compute_mig/test_plan.py index 253e27bc..e24a7ca7 100644 --- a/tests/modules/compute_mig/test_plan.py +++ b/tests/modules/compute_mig/test_plan.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_defaults(plan_runner): "Test variable defaults." _, resources = plan_runner() @@ -21,7 +22,7 @@ def test_defaults(plan_runner): assert mig['type'] == 'google_compute_instance_group_manager' assert mig['values']['target_size'] == 2 assert mig['values']['zone'] - _, resources = plan_runner(regional='true') + _, resources = plan_runner(location='"europe-west1"') assert len(resources) == 1 mig = resources[0] assert mig['type'] == 'google_compute_region_instance_group_manager' @@ -31,7 +32,12 @@ def test_defaults(plan_runner): def test_health_check(plan_runner): "Test health check resource." - health_check_config = '{type="tcp", check={port=80}, config=null, logging=false}' + health_check_config = '''{ + enable_logging = true + tcp = { + port = 80 + } + }''' _, resources = plan_runner(health_check_config=health_check_config) assert len(resources) == 2 assert any(r['type'] == 'google_compute_health_check' for r in resources) @@ -39,20 +45,26 @@ def test_health_check(plan_runner): def test_autoscaler(plan_runner): "Test autoscaler resource." - autoscaler_config = ( - '{' - 'max_replicas=3, min_replicas=1, cooldown_period=60,' - 'cpu_utilization_target=65, load_balancing_utilization_target=null,' - 'metric=null' - '}' - ) + autoscaler_config = '''{ + colldown_period = 60 + max_replicas = 3 + min_replicas = 1 + scaling_signals = { + cpu_utilization = { + target = 65 + } + } + }''' _, resources = plan_runner(autoscaler_config=autoscaler_config) assert len(resources) == 2 autoscaler = resources[0] assert autoscaler['type'] == 'google_compute_autoscaler' assert autoscaler['values']['autoscaling_policy'] == [{ 'cooldown_period': 60, - 'cpu_utilization': [{'predictive_method': 'NONE', 'target': 65}], + 'cpu_utilization': [{ + 'predictive_method': 'NONE', + 'target': 65 + }], 'load_balancing_utilization': [], 'max_replicas': 3, 'metric': [], @@ -62,7 +74,7 @@ def test_autoscaler(plan_runner): 'scaling_schedules': [], }] _, resources = plan_runner(autoscaler_config=autoscaler_config, - regional='true') + location='"europe-west1"') assert len(resources) == 2 autoscaler = resources[0] assert autoscaler['type'] == 'google_compute_region_autoscaler' @@ -71,17 +83,10 @@ def test_autoscaler(plan_runner): def test_stateful_mig(plan_runner): "Test stateful instances - mig." - stateful_config = ( - '{' - 'per_instance_config = {},' - 'mig_config = {' - 'stateful_disks = {' - 'persistent-disk-1 = {delete_rule="NEVER"}' - '}' - '}' - '}' - ) - _, resources = plan_runner(stateful_config=stateful_config) + stateful_disks = '''{ + persistent-disk-1 = null + }''' + _, resources = plan_runner(stateful_disks=stateful_disks) assert len(resources) == 1 statefuldisk = resources[0] assert statefuldisk['type'] == 'google_compute_instance_group_manager' @@ -93,35 +98,19 @@ def test_stateful_mig(plan_runner): def test_stateful_instance(plan_runner): "Test stateful instances - instance." - stateful_config = ( - '{' - 'per_instance_config = {' - 'instance-1 = {' - 'stateful_disks = {' - 'persistent-disk-1 = {' - 'source = "test-disk",' - 'mode = "READ_ONLY",' - 'delete_rule= "NEVER",' - '},' - '},' - 'metadata = {' - 'foo = "bar"' - '},' - 'update_config = {' - 'minimal_action = "NONE",' - 'most_disruptive_allowed_action = "REPLACE",' - 'remove_instance_state_on_destroy = false,' - - '},' - '},' - '},' - 'mig_config = {' - 'stateful_disks = {' - 'persistent-disk-1 = {delete_rule="NEVER"}' - '}' - '}' - '}' - ) + stateful_config = '''{ + instance-1 = { + most_disruptive_action = "REPLACE", + preserved_state = { + disks = { + persistent-disk-1 = { + source = "test-disk" + } + } + metadata = { foo = "bar" } + } + } + }''' _, resources = plan_runner(stateful_config=stateful_config) assert len(resources) == 2 instanceconfig = resources[0] @@ -134,13 +123,12 @@ def test_stateful_instance(plan_runner): 'device_name': 'persistent-disk-1', 'delete_rule': 'NEVER', 'source': 'test-disk', - 'mode': 'READ_ONLY', + 'mode': 'READ_WRITE', }], 'metadata': { 'foo': 'bar' } }] - assert instanceconfig['values']['minimal_action'] == 'NONE' assert instanceconfig['values']['most_disruptive_allowed_action'] == 'REPLACE' assert instanceconfig['values']['remove_instance_state_on_destroy'] == False diff --git a/tests/modules/dns/fixture/variables.tf b/tests/modules/dns/fixture/variables.tf index 522b238a..8e55a287 100644 --- a/tests/modules/dns/fixture/variables.tf +++ b/tests/modules/dns/fixture/variables.tf @@ -32,15 +32,27 @@ variable "peer_network" { } variable "recordsets" { - type = map(object({ - ttl = number - records = list(string) - })) + type = any default = { "A localhost" = { ttl = 300, records = ["127.0.0.1"] } "A local-host.test.example." = { ttl = 300, records = ["127.0.0.2"] } "CNAME *" = { ttl = 300, records = ["localhost.example.org."] } "A " = { ttl = 300, records = ["127.0.0.3"] } + "A geo" = { + geo_routing = [ + { location = "europe-west1", records = ["127.0.0.4"] }, + { location = "europe-west2", records = ["127.0.0.5"] }, + { location = "europe-west3", records = ["127.0.0.6"] } + ] + } + "A wrr" = { + ttl = 600 + wrr_routing = [ + { weight = 0.6, records = ["127.0.0.7"] }, + { weight = 0.2, records = ["127.0.0.8"] }, + { weight = 0.2, records = ["127.0.0.9"] } + ] + } } } diff --git a/tests/modules/dns/test_plan.py b/tests/modules/dns/test_plan.py index 184ffe5d..5cc1ba70 100644 --- a/tests/modules/dns/test_plan.py +++ b/tests/modules/dns/test_plan.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_private(plan_runner): "Test private zone with three recordsets." _, resources = plan_runner() - assert len(resources) == 5 - assert set(r['type'] for r in resources) == set([ + assert len(resources) == 7 + assert set(r['type'] for r in resources) == { 'google_dns_record_set', 'google_dns_managed_zone' - ]) + } for r in resources: if r['type'] != 'google_dns_managed_zone': continue @@ -29,14 +30,54 @@ def test_private(plan_runner): def test_private_recordsets(plan_runner): "Test recordsets in private zone." _, resources = plan_runner() - recordsets = [r['values'] - for r in resources if r['type'] == 'google_dns_record_set'] - assert set(r['name'] for r in recordsets) == set([ - 'localhost.test.example.', - 'local-host.test.example.', - '*.test.example.', - "test.example." - ]) + recordsets = [ + r['values'] for r in resources if r['type'] == 'google_dns_record_set' + ] + + assert set(r['name'] for r in recordsets) == { + 'localhost.test.example.', 'local-host.test.example.', '*.test.example.', + "test.example.", "geo.test.example.", "wrr.test.example." + } + + for r in recordsets: + if r['name'] not in ['wrr.test.example.', 'geo.test.example.']: + assert r['routing_policy'] == [] + assert r['rrdatas'] != [] + + +def test_routing_policies(plan_runner): + "Test recordsets with routing policies." + _, resources = plan_runner() + recordsets = [ + r['values'] for r in resources if r['type'] == 'google_dns_record_set' + ] + geo_zone = [ + r['values'] for r in resources if r['address'] == + 'module.test.google_dns_record_set.cloud-geo-records["A geo"]' + ][0] + assert geo_zone['name'] == 'geo.test.example.' + assert geo_zone['routing_policy'][0]['wrr'] == [] + geo_policy = geo_zone['routing_policy'][0]['geo'] + assert geo_policy[0]['location'] == 'europe-west1' + assert geo_policy[0]['rrdatas'] == ['127.0.0.4'] + assert geo_policy[1]['location'] == 'europe-west2' + assert geo_policy[1]['rrdatas'] == ['127.0.0.5'] + assert geo_policy[2]['location'] == 'europe-west3' + assert geo_policy[2]['rrdatas'] == ['127.0.0.6'] + + wrr_zone = [ + r['values'] for r in resources if r['address'] == + 'module.test.google_dns_record_set.cloud-wrr-records["A wrr"]' + ][0] + assert wrr_zone['name'] == 'wrr.test.example.' + wrr_policy = wrr_zone['routing_policy'][0]['wrr'] + assert wrr_policy[0]['weight'] == 0.6 + assert wrr_policy[0]['rrdatas'] == ['127.0.0.7'] + assert wrr_policy[1]['weight'] == 0.2 + assert wrr_policy[1]['rrdatas'] == ['127.0.0.8'] + assert wrr_policy[2]['weight'] == 0.2 + assert wrr_policy[2]['rrdatas'] == ['127.0.0.9'] + assert wrr_zone['routing_policy'][0]['geo'] == [] def test_private_no_networks(plan_runner): @@ -60,26 +101,31 @@ def test_forwarding_recordsets_null_forwarders(plan_runner): def test_forwarding(plan_runner): "Test forwarding zone with single forwarder." - _, resources = plan_runner( - type='forwarding', recordsets='null', - forwarders='{ "1.2.3.4" = null }') + _, resources = plan_runner(type='forwarding', recordsets='null', + forwarders='{ "1.2.3.4" = null }') assert len(resources) == 1 resource = resources[0] assert resource['type'] == 'google_dns_managed_zone' - assert resource['values']['forwarding_config'] == [{'target_name_servers': [ - {'forwarding_path': '', 'ipv4_address': '1.2.3.4'}]}] + assert resource['values']['forwarding_config'] == [{ + 'target_name_servers': [{ + 'forwarding_path': '', + 'ipv4_address': '1.2.3.4' + }] + }] def test_peering(plan_runner): "Test peering zone." - _, resources = plan_runner(type='peering', - recordsets='null', + _, resources = plan_runner(type='peering', recordsets='null', peer_network='dummy-vpc-self-link') assert len(resources) == 1 resource = resources[0] assert resource['type'] == 'google_dns_managed_zone' - assert resource['values']['peering_config'] == [ - {'target_network': [{'network_url': 'dummy-vpc-self-link'}]}] + assert resource['values']['peering_config'] == [{ + 'target_network': [{ + 'network_url': 'dummy-vpc-self-link' + }] + }] def test_public(plan_runner): diff --git a/tests/modules/folder/fixture/main.tf b/tests/modules/folder/fixture/main.tf index 2fa1b4fd..a347f61b 100644 --- a/tests/modules/folder/fixture/main.tf +++ b/tests/modules/folder/fixture/main.tf @@ -22,10 +22,10 @@ module "test" { iam = var.iam iam_additive = var.iam_additive iam_additive_members = var.iam_additive_members - policy_boolean = var.policy_boolean - policy_list = var.policy_list firewall_policies = var.firewall_policies firewall_policy_association = var.firewall_policy_association logging_sinks = var.logging_sinks logging_exclusions = var.logging_exclusions + org_policies = var.org_policies + org_policies_data_path = var.org_policies_data_path } diff --git a/tests/modules/folder/fixture/variables.tf b/tests/modules/folder/fixture/variables.tf index da676deb..e2d7a293 100644 --- a/tests/modules/folder/fixture/variables.tf +++ b/tests/modules/folder/fixture/variables.tf @@ -34,16 +34,6 @@ variable "iam_additive_members" { default = {} } -variable "policy_boolean" { - type = any - default = {} -} - -variable "policy_list" { - type = any - default = {} -} - variable "firewall_policies" { type = any default = {} @@ -63,3 +53,13 @@ variable "logging_exclusions" { type = any default = {} } + +variable "org_policies" { + type = any + default = {} +} + +variable "org_policies_data_path" { + type = any + default = null +} diff --git a/tests/modules/folder/test_plan_org_policies.py b/tests/modules/folder/test_plan_org_policies.py index b7ae96c2..0a6b9729 100644 --- a/tests/modules/folder/test_plan_org_policies.py +++ b/tests/modules/folder/test_plan_org_policies.py @@ -12,56 +12,244 @@ # See the License for the specific language governing permissions and # limitations under the License. -def test_sink(plan_runner): - "Test folder-level sink." - policy_boolean = '{policy-a = true, policy-b = false, policy-c = null}' - _, resources = plan_runner(policy_boolean=policy_boolean) +import hcl2 +import yaml +BOOLEAN_POLICIES = '''{ + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + enforce = true + } + ] + } +}''' + +LIST_POLICIES = '''{ + "compute.vmExternalIpAccess" = { + deny = { all = true } + } + "iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } + "compute.restrictLoadBalancerCreationForTypes" = { + deny = { values = ["in:EXTERNAL"] } + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + allow = { + values = ["EXTERNAL_1"] + } + }, + { + condition = { + expression = "resource.matchTagId(cc, dd)" + title = "condition2" + description = "test condition2" + location = "xxx" + } + allow = { + all = true + } + } + ] + } +}''' + + +def test_policy_boolean(plan_runner): + "Test boolean org policy." + _, resources = plan_runner(org_policies=BOOLEAN_POLICIES) + validate_policy_boolean_resources(resources) + + +def test_policy_list(plan_runner): + "Test list org policy." + _, resources = plan_runner(org_policies=LIST_POLICIES) + validate_policy_list_resources(resources) + + +def test_policy_boolean_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {BOOLEAN_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_boolean_resources(resources) + + +def test_policy_list_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {LIST_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_list_resources(resources) + + +def validate_policy_boolean_resources(resources): + assert len(resources) == 3 + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 2 + + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyCreation' + ][0] + + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyUpload' + ][0] + + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert len(p2['rules']) == 2 + assert p2['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'FALSE', + 'values': [] + } + assert p2['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + } + + +def validate_policy_list_resources(resources): assert len(resources) == 4 - resources = [r for r in resources if r['type'] - == 'google_folder_organization_policy'] - assert sorted([r['index'] for r in resources]) == [ - 'policy-a', - 'policy-b', - 'policy-c', - ] - policy_values = [] - for resource in resources: - for policy in ('boolean_policy', 'restore_policy'): - value = resource['values'][policy] - if value: - policy_values.append((resource['index'], policy,) + value[0].popitem()) - assert sorted(policy_values) == [ - ('policy-a', 'boolean_policy', 'enforced', True), - ('policy-b', 'boolean_policy', 'enforced', False), - ('policy-c', 'restore_policy', 'default', True), - ] + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 3 -def test_exclussions(plan_runner): - "Test folder-level logging exclusions." - policy_list = ( - '{' - 'policy-a = {inherit_from_parent = true, suggested_value = null, status = true, values = []}, ' - 'policy-b = {inherit_from_parent = null, suggested_value = "foo", status = false, values = ["bar"]}, ' - 'policy-c = {inherit_from_parent = null, suggested_value = true, status = null, values = null}' - '}' - ) - _, resources = plan_runner(policy_list=policy_list) - assert len(resources) == 4 - resources = [r for r in resources if r['type'] - == 'google_folder_organization_policy'] - assert sorted([r['index'] for r in resources]) == [ - 'policy-a', - 'policy-b', - 'policy-c', - ] - values = [r['values'] for r in resources] - assert [r['constraint'] for r in values] == [ - 'policy-a', 'policy-b', 'policy-c' - ] - assert values[0]['list_policy'][0]['allow'] == [ - {'all': True, 'values': None}] - assert values[1]['list_policy'][0]['deny'] == [ - {'all': False, 'values': ["bar"]}] - assert values[2]['restore_policy'] == [{'default': True}] + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.vmExternalIpAccess' + ][0] + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': 'TRUE', + 'enforce': None, + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.allowedPolicyMemberDomains' + ][0] + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert p2['rules'] == [{ + 'allow_all': + None, + 'condition': [], + 'deny_all': + None, + 'enforce': + None, + 'values': [{ + 'allowed_values': [ + 'C0xxxxxxx', + 'C0yyyyyyy', + ], + 'denied_values': None + }] + }] + + p3 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.restrictLoadBalancerCreationForTypes' + ][0] + assert p3['inherit_from_parent'] is None + assert p3['reset'] is None + assert len(p3['rules']) == 3 + assert p3['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': None, + 'denied_values': ['in:EXTERNAL'] + }] + } + + assert p3['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': ['EXTERNAL_1'], + 'denied_values': None + }] + } + + assert p3['rules'][2] == { + 'allow_all': 'TRUE', + 'condition': [{ + 'description': 'test condition2', + 'expression': 'resource.matchTagId(cc, dd)', + 'location': 'xxx', + 'title': 'condition2' + }], + 'deny_all': None, + 'enforce': None, + 'values': [] + } diff --git a/tests/modules/gke_cluster/fixture/variables.tf b/tests/modules/gke_cluster/fixture/variables.tf index 1b539d20..97fc6a63 100644 --- a/tests/modules/gke_cluster/fixture/variables.tf +++ b/tests/modules/gke_cluster/fixture/variables.tf @@ -28,3 +28,10 @@ variable "enable_features" { workload_identity = true } } + +variable "monitoring_config" { + type = any + default = { + managed_prometheus = true + } +} diff --git a/tests/modules/gke_nodepool/fixture/main.tf b/tests/modules/gke_nodepool/fixture/main.tf index aaa030b9..4ee27482 100644 --- a/tests/modules/gke_nodepool/fixture/main.tf +++ b/tests/modules/gke_nodepool/fixture/main.tf @@ -14,22 +14,31 @@ * limitations under the License. */ +resource "google_service_account" "test" { + project = "my-project" + account_id = "gke-nodepool-test" + display_name = "Test Service Account" +} + module "test" { - source = "../../../../modules/gke-nodepool" - project_id = "my-project" - cluster_name = "cluster-1" - location = "europe-west1-b" - name = "nodepool-1" - gke_version = var.gke_version - labels = var.labels - max_pods_per_node = var.max_pods_per_node - node_config = var.node_config - node_count = var.node_count - node_locations = var.node_locations - nodepool_config = var.nodepool_config - pod_range = var.pod_range - reservation_affinity = var.reservation_affinity - service_account = var.service_account + source = "../../../../modules/gke-nodepool" + project_id = "my-project" + cluster_name = "cluster-1" + location = "europe-west1-b" + name = "nodepool-1" + gke_version = var.gke_version + labels = var.labels + max_pods_per_node = var.max_pods_per_node + node_config = var.node_config + node_count = var.node_count + node_locations = var.node_locations + nodepool_config = var.nodepool_config + pod_range = var.pod_range + reservation_affinity = var.reservation_affinity + service_account = { + create = var.service_account_create + email = google_service_account.test.email + } sole_tenant_nodegroup = var.sole_tenant_nodegroup tags = var.tags taints = var.taints diff --git a/tests/modules/gke_nodepool/fixture/variables.tf b/tests/modules/gke_nodepool/fixture/variables.tf index 420b9eb0..18376ec5 100644 --- a/tests/modules/gke_nodepool/fixture/variables.tf +++ b/tests/modules/gke_nodepool/fixture/variables.tf @@ -65,9 +65,9 @@ variable "reservation_affinity" { default = null } -variable "service_account" { - type = any - default = null +variable "service_account_create" { + type = bool + default = false } variable "sole_tenant_nodegroup" { diff --git a/tests/modules/gke_nodepool/test_plan.py b/tests/modules/gke_nodepool/test_plan.py index fd63f332..75d1cc14 100644 --- a/tests/modules/gke_nodepool/test_plan.py +++ b/tests/modules/gke_nodepool/test_plan.py @@ -21,9 +21,9 @@ def test_defaults(plan_runner): def test_service_account(plan_runner): - _, resources = plan_runner(service_account='{email="foo@example.org"}') + _, resources = plan_runner() assert len(resources) == 1 - _, resources = plan_runner(service_account='{}') + _, resources = plan_runner(service_account_create='true') assert len(resources) == 2 assert 'google_service_account' in [r['type'] for r in resources] diff --git a/tests/modules/net_ilb/fixture/main.tf b/tests/modules/net_ilb/fixture/main.tf index c592421f..66346efc 100644 --- a/tests/modules/net_ilb/fixture/main.tf +++ b/tests/modules/net_ilb/fixture/main.tf @@ -15,21 +15,21 @@ */ module "test" { - source = "../../../../modules/net-ilb" - project_id = "my-project" - region = "europe-west1" - network = "default" - subnetwork = "default" - name = "ilb-test" - labels = {} - address = var.address - backends = var.backends - backend_config = var.backend_config - failover_config = var.failover_config - global_access = var.global_access - health_check = var.health_check - health_check_config = var.health_check_config - ports = var.ports - protocol = var.protocol - service_label = var.service_label + source = "../../../../modules/net-ilb" + project_id = "my-project" + region = "europe-west1" + name = "ilb-test" + vpc_config = { + network = "default" + subnetwork = "default" + } + address = var.address + backend_service_config = var.backend_service_config + backends = var.backends + description = var.description + global_access = var.global_access + group_configs = var.group_configs + ports = var.ports + protocol = var.protocol + service_label = var.service_label } diff --git a/tests/modules/net_ilb/fixture/variables.tf b/tests/modules/net_ilb/fixture/variables.tf index b00b49a6..2c2c2fb4 100644 --- a/tests/modules/net_ilb/fixture/variables.tf +++ b/tests/modules/net_ilb/fixture/variables.tf @@ -19,30 +19,20 @@ variable "address" { default = null } +variable "backend_service_config" { + description = "Backend service level configuration." + type = any + default = {} +} + variable "backends" { - type = list(object({ - failover = bool - group = string - balancing_mode = string - })) + type = any + default = [] } -variable "backend_config" { - type = object({ - session_affinity = string - timeout_sec = number - connection_draining_timeout_sec = number - }) - default = null -} - -variable "failover_config" { - type = object({ - disable_connection_drain = bool - drop_traffic_if_unhealthy = bool - ratio = number - }) - default = null +variable "description" { + type = string + default = "Terraform managed." } variable "global_access" { @@ -50,26 +40,9 @@ variable "global_access" { default = null } -variable "health_check" { - type = string - default = null -} - -variable "health_check_config" { - type = object({ - type = string # http https tcp ssl http2 - check = map(any) # actual health check block attributes - config = map(number) # interval, thresholds, timeout - logging = bool - }) - default = { - type = "http" - check = { - port_specification = "USE_SERVING_PORT" - } - config = {} - logging = false - } +variable "group_configs" { + type = any + default = {} } variable "ports" { diff --git a/tests/modules/net_ilb/test_plan.py b/tests/modules/net_ilb/test_plan.py index 722ada3d..9956b331 100644 --- a/tests/modules/net_ilb/test_plan.py +++ b/tests/modules/net_ilb/test_plan.py @@ -29,7 +29,7 @@ def test_defaults(plan_runner): assert backend['backend'][0]['group'] == 'foo' health_check = resources['google_compute_health_check'] for k, v in health_check.items(): - if k == 'http_health_check': + if k == 'tcp_health_check': assert len(v) == 1 assert v[0]['port_specification'] == 'USE_SERVING_PORT' elif k.endswith('_health_check'): @@ -38,12 +38,14 @@ def test_defaults(plan_runner): def test_forwarding_rule(plan_runner): "Test forwarding rule variables." - _, resources = plan_runner(backends=_BACKENDS, - global_access='true', + _, resources = plan_runner(backends=_BACKENDS, global_access='true', ports="[80]") assert len(resources) == 3 - values = [r['values'] for r in resources if r['type'] - == 'google_compute_forwarding_rule'][0] + values = [ + r['values'] + for r in resources + if r['type'] == 'google_compute_forwarding_rule' + ][0] assert not values['all_ports'] assert values['ports'] == ['80'] assert values['allow_global_access'] diff --git a/tests/modules/organization/fixture/main.tf b/tests/modules/organization/fixture/main.tf index 04ae4adf..4f5df9e2 100644 --- a/tests/modules/organization/fixture/main.tf +++ b/tests/modules/organization/fixture/main.tf @@ -28,8 +28,8 @@ module "test" { iam_audit_config = var.iam_audit_config logging_sinks = var.logging_sinks logging_exclusions = var.logging_exclusions - policy_boolean = var.policy_boolean - policy_list = var.policy_list + org_policies = var.org_policies + org_policies_data_path = var.org_policies_data_path tag_bindings = var.tag_bindings tags = var.tags } diff --git a/tests/modules/organization/fixture/variables.tf b/tests/modules/organization/fixture/variables.tf index 1d7ca88d..2508fb06 100644 --- a/tests/modules/organization/fixture/variables.tf +++ b/tests/modules/organization/fixture/variables.tf @@ -44,16 +44,6 @@ variable "iam_audit_config" { default = {} } -variable "policy_boolean" { - type = any - default = {} -} - -variable "policy_list" { - type = any - default = {} -} - variable "firewall_policies" { type = any default = {} @@ -79,6 +69,16 @@ variable "logging_exclusions" { default = {} } +variable "org_policies" { + type = any + default = {} +} + +variable "org_policies_data_path" { + type = any + default = null +} + variable "tag_bindings" { type = any default = null diff --git a/tests/modules/organization/test_plan.py b/tests/modules/organization/test_plan.py index a40758a2..37860ab6 100644 --- a/tests/modules/organization/test_plan.py +++ b/tests/modules/organization/test_plan.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_audit_config(plan_runner): "Test audit config." iam_audit_config = '{allServices={DATA_READ=[], DATA_WRITE=["user:me@example.org"]}}' _, resources = plan_runner(iam_audit_config=iam_audit_config) assert len(resources) == 1 - log_types = set(r['log_type'] - for r in resources[0]['values']['audit_log_config']) + log_types = set( + r['log_type'] for r in resources[0]['values']['audit_log_config']) assert log_types == set(['DATA_READ', 'DATA_WRITE']) @@ -28,21 +29,21 @@ def test_iam(plan_runner): '{' '"owners@example.org" = ["roles/owner", "roles/resourcemanager.folderAdmin"],' '"viewers@example.org" = ["roles/viewer"]' - '}' - ) - iam = ( - '{' - '"roles/owner" = ["user:one@example.org", "user:two@example.org"],' - '"roles/browser" = ["domain:example.org"]' - '}' - ) + '}') + iam = ('{' + '"roles/owner" = ["user:one@example.org", "user:two@example.org"],' + '"roles/browser" = ["domain:example.org"]' + '}') _, resources = plan_runner(group_iam=group_iam, iam=iam) roles = sorted([(r['values']['role'], sorted(r['values']['members'])) - for r in resources if r['type'] == 'google_organization_iam_binding']) + for r in resources + if r['type'] == 'google_organization_iam_binding']) assert roles == [ ('roles/browser', ['domain:example.org']), - ('roles/owner', ['group:owners@example.org', 'user:one@example.org', - 'user:two@example.org']), + ('roles/owner', [ + 'group:owners@example.org', 'user:one@example.org', + 'user:two@example.org' + ]), ('roles/resourcemanager.folderAdmin', ['group:owners@example.org']), ('roles/viewer', ['group:viewers@example.org']), ] @@ -50,55 +51,12 @@ def test_iam(plan_runner): def test_iam_additive_members(plan_runner): "Test IAM additive members." - iam = ( - '{"user:one@example.org" = ["roles/owner"],' - '"user:two@example.org" = ["roles/owner", "roles/editor"]}' - ) + iam = ('{"user:one@example.org" = ["roles/owner"],' + '"user:two@example.org" = ["roles/owner", "roles/editor"]}') _, resources = plan_runner(iam_additive_members=iam) roles = set((r['values']['role'], r['values']['member']) - for r in resources if r['type'] == 'google_organization_iam_member') - assert roles == set([ - ('roles/owner', 'user:one@example.org'), - ('roles/owner', 'user:two@example.org'), - ('roles/editor', 'user:two@example.org') - ]) - - -def test_policy_boolean(plan_runner): - "Test boolean org policy." - policy_boolean = '{policy-a = true, policy-b = false, policy-c = null}' - _, resources = plan_runner(policy_boolean=policy_boolean) - assert len(resources) == 3 - constraints = set(r['values']['constraint'] for r in resources) - assert set(constraints) == set(['policy-a', 'policy-b', 'policy-c']) - policies = [] - for resource in resources: - for policy in ('boolean_policy', 'restore_policy'): - value = resource['values'][policy] - if value: - policies.append((policy,) + value[0].popitem()) - assert set(policies) == set([ - ('boolean_policy', 'enforced', True), - ('boolean_policy', 'enforced', False), - ('restore_policy', 'default', True)]) - - -def test_policy_list(plan_runner): - "Test list org policy." - policy_list = ( - '{' - 'policy-a = {inherit_from_parent = true, suggested_value = null, status = true, values = []}, ' - 'policy-b = {inherit_from_parent = null, suggested_value = "foo", status = false, values = ["bar"]}, ' - 'policy-c = {inherit_from_parent = null, suggested_value = true, status = null, values = null}' - '}' - ) - _, resources = plan_runner(policy_list=policy_list) - assert len(resources) == 3 - values = [r['values'] for r in resources] - assert [r['constraint'] - for r in values] == ['policy-a', 'policy-b', 'policy-c'] - assert values[0]['list_policy'][0]['allow'] == [ - {'all': True, 'values': None}] - assert values[1]['list_policy'][0]['deny'] == [ - {'all': False, 'values': ["bar"]}] - assert values[2]['restore_policy'] == [{'default': True}] + for r in resources + if r['type'] == 'google_organization_iam_member') + assert roles == set([('roles/owner', 'user:one@example.org'), + ('roles/owner', 'user:two@example.org'), + ('roles/editor', 'user:two@example.org')]) diff --git a/tests/modules/organization/test_plan_org_policies.py b/tests/modules/organization/test_plan_org_policies.py new file mode 100644 index 00000000..053457ce --- /dev/null +++ b/tests/modules/organization/test_plan_org_policies.py @@ -0,0 +1,338 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import difflib +from pathlib import Path + +import hcl2 +import yaml + +BOOLEAN_POLICIES = '''{ + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + enforce = true + } + ] + } +}''' + +LIST_POLICIES = '''{ + "compute.vmExternalIpAccess" = { + deny = { all = true } + } + "iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } + "compute.restrictLoadBalancerCreationForTypes" = { + deny = { values = ["in:EXTERNAL"] } + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + allow = { + values = ["EXTERNAL_1"] + } + }, + { + condition = { + expression = "resource.matchTagId(cc, dd)" + title = "condition2" + description = "test condition2" + location = "xxx" + } + allow = { + all = true + } + } + ] + } +}''' + + +def test_policy_boolean(plan_runner): + "Test boolean org policy." + _, resources = plan_runner(org_policies=BOOLEAN_POLICIES) + validate_policy_boolean_resources(resources) + + +def test_policy_list(plan_runner): + "Test list org policy." + _, resources = plan_runner(org_policies=LIST_POLICIES) + validate_policy_list_resources(resources) + + +def test_policy_boolean_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {BOOLEAN_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_boolean_resources(resources) + + +def test_policy_list_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {LIST_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_list_resources(resources) + + +def validate_policy_boolean_resources(resources): + assert len(resources) == 2 + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 2 + assert all( + x['values']['parent'] == 'organizations/1234567890' for x in policies) + + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyCreation' + ][0] + + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyUpload' + ][0] + + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert len(p2['rules']) == 2 + assert p2['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'FALSE', + 'values': [] + } + assert p2['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + } + + +def validate_policy_list_resources(resources): + assert len(resources) == 3 + + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 3 + assert all( + x['values']['parent'] == 'organizations/1234567890' for x in policies) + + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.vmExternalIpAccess' + ][0] + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': 'TRUE', + 'enforce': None, + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.allowedPolicyMemberDomains' + ][0] + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert p2['rules'] == [{ + 'allow_all': + None, + 'condition': [], + 'deny_all': + None, + 'enforce': + None, + 'values': [{ + 'allowed_values': [ + 'C0xxxxxxx', + 'C0yyyyyyy', + ], + 'denied_values': None + }] + }] + + p3 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.restrictLoadBalancerCreationForTypes' + ][0] + assert p3['inherit_from_parent'] is None + assert p3['reset'] is None + assert len(p3['rules']) == 3 + assert p3['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': None, + 'denied_values': ['in:EXTERNAL'] + }] + } + + assert p3['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': ['EXTERNAL_1'], + 'denied_values': None + }] + } + + assert p3['rules'][2] == { + 'allow_all': 'TRUE', + 'condition': [{ + 'description': 'test condition2', + 'expression': 'resource.matchTagId(cc, dd)', + 'location': 'xxx', + 'title': 'condition2' + }], + 'deny_all': None, + 'enforce': None, + 'values': [] + } + + +def test_policy_implementation(plan_runner): + '''Verify org policy implementation is the same (except minor + differences) in the organization, folder and project modules.''' + + modules_path = Path(__file__).parents[3] / 'modules' + lines = {} + for module in ['project', 'folder', 'organization']: + path = modules_path / module / 'organization-policies.tf' + lines[module] = path.open().readlines() + + diff1 = difflib.unified_diff(lines['project'], lines['folder']) + assert list(diff1) == [ + '--- \n', + '+++ \n', + '@@ -14,7 +14,7 @@\n', + ' * limitations under the License.\n', + ' */\n', + ' \n', + '-# tfdoc:file:description Project-level organization policies.\n', + '+# tfdoc:file:description Folder-level organization policies.\n', + ' \n', + ' locals {\n', + ' _factory_data_raw = (\n', + '@@ -69,8 +69,8 @@\n', + ' org_policies = {\n', + ' for k, v in local._org_policies :\n', + ' k => merge(v, {\n', + '- name = "projects/${local.project.project_id}/policies/${k}"\n', + '- parent = "projects/${local.project.project_id}"\n', + '+ name = "${local.folder.name}/policies/${k}"\n', + '+ parent = local.folder.name\n', + ' \n', + ' is_boolean_policy = v.allow == null && v.deny == null\n', + ' has_values = (\n', + ] + + diff2 = difflib.unified_diff(lines['folder'], lines['organization']) + assert list(diff2) == [ + '--- \n', + '+++ \n', + '@@ -14,7 +14,7 @@\n', + ' * limitations under the License.\n', + ' */\n', + ' \n', + '-# tfdoc:file:description Folder-level organization policies.\n', + '+# tfdoc:file:description Organization-level organization policies.\n', + ' \n', + ' locals {\n', + ' _factory_data_raw = (\n', + '@@ -69,8 +69,8 @@\n', + ' org_policies = {\n', + ' for k, v in local._org_policies :\n', + ' k => merge(v, {\n', + '- name = "${local.folder.name}/policies/${k}"\n', + '- parent = local.folder.name\n', + '+ name = "${var.organization_id}/policies/${k}"\n', + '+ parent = var.organization_id\n', + ' \n', + ' is_boolean_policy = v.allow == null && v.deny == null\n', + ' has_values = (\n', + '@@ -143,4 +143,12 @@\n', + ' }\n', + ' }\n', + ' }\n', + '+\n', + '+ depends_on = [\n', + '+ google_organization_iam_audit_config.config,\n', + '+ google_organization_iam_binding.authoritative,\n', + '+ google_organization_iam_custom_role.roles,\n', + '+ google_organization_iam_member.additive,\n', + '+ google_organization_iam_policy.authoritative,\n', + '+ ]\n', + ' }\n', + ] diff --git a/tests/modules/organization_policy/fixture/policies/test.yaml b/tests/modules/organization_policy/fixture/policies/test.yaml deleted file mode 100644 index 4b81e524..00000000 --- a/tests/modules/organization_policy/fixture/policies/test.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -organizations/1234567890: - constraints/compute.vmExternalIpAccess: - rules: - - deny_all: true -folders/1234567890: - compute.vmCanIpForward: - inherit_from_parent: false - reset: false - rules: - - allow: [] -projects/my-project-id: - run.allowedIngress: - inherit_from_parent: true - rules: - - allow: ['internal'] - condition: - description: allow internal ingress - expression: resource.matchTag("123456789/environment", "prod") - location: test.log - title: allow-for-prod - iam.allowServiceAccountCredentialLifetimeExtension: - rules: - - deny: [] - compute.disableGlobalLoadBalancing: - reset: true diff --git a/tests/modules/organization_policy/fixture/variables.tf b/tests/modules/organization_policy/fixture/variables.tf deleted file mode 100644 index 8196bcff..00000000 --- a/tests/modules/organization_policy/fixture/variables.tf +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -variable "config_directory" { - description = "Paths to a folder where organization policy configs are stored in yaml format. Files suffix must be `.yaml`." - type = string - default = null -} - -variable "policies" { - description = "Organization policies keyed by parent in format `projects/project-id`, `folders/1234567890` or `organizations/1234567890`." - type = map(map(object({ - inherit_from_parent = optional(bool) # List policy only. - reset = optional(bool) - rules = optional( - list(object({ - allow = optional(list(string)) # List policy only. Stands for `allow_all` if set to empty list `[]` or to `values.allowed_values` if set to a list of values - deny = optional(list(string)) # List policy only. Stands for `deny_all` if set to empty list `[]` or to `values.denied_values` if set to a list of values - enforce = optional(bool) # Boolean policy only. - condition = optional( - object({ - description = optional(string) - expression = optional(string) - location = optional(string) - title = optional(string) - }) - ) - })) - ) - }))) - default = {} -} diff --git a/tests/modules/organization_policy/test_plan.py b/tests/modules/organization_policy/test_plan.py deleted file mode 100644 index fa7e5cd7..00000000 --- a/tests/modules/organization_policy/test_plan.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -def test_org_policy_simple(plan_runner): - "Test vpc with no extra options." - org_policies = ( - '{' - '"folders/1234567890" = {' - ' "constraints/iam.disableServiceAccountKeyUpload" = {' - ' rules = [' - ' {' - ' enforce = true,' - ' }' - ' ]' - ' }' - ' },' - ' "organizations/1234567890" = {' - ' "run.allowedIngress" = {' - ' rules = [' - ' {' - ' allow = ["internal"],' - ' condition = {' - ' description= "allow ingress",' - ' expression = "resource.matchTag(\'123456789/environment\', \'prod\')",' - ' title = "allow-for-prod-org"' - ' }' - ' }' - ' ]' - ' }' - ' }' - '}' - ) - _, resources = plan_runner( - policies = org_policies - ) - assert len(resources) == 2 - - org_policy = [r for r in resources if r["values"] - ["name"].endswith('iam.disableServiceAccountKeyUpload')][0]["values"] - assert org_policy["parent"] == "folders/1234567890" - assert org_policy["spec"][0]["rules"][0]["enforce"] == "TRUE" - - -def test_org_policy_factory(plan_runner): - "Test yaml based configuration" - _, resources = plan_runner( - config_directory="./policies", - ) - assert len(resources) == 5 - - org_policy = [r for r in resources if r["values"] - ["name"].endswith('run.allowedIngress')][0]["values"]["spec"][0] - assert org_policy["inherit_from_parent"] == True - assert org_policy["rules"][0]["condition"][0]["title"] == "allow-for-prod" - assert set(org_policy["rules"][0]["values"][0]["allowed_values"]) == set(["internal"]) - - -def test_combined_org_policy_config(plan_runner): - "Test combined (yaml, hcl) policy configuration" - org_policies = ( - '{' - '"folders/3456789012" = {' - ' "constraints/iam.disableServiceAccountKeyUpload" = {' - ' rules = [' - ' {' - ' enforce = true' - ' }' - ' ]' - ' }' - ' }' - '}' - ) - _, resources = plan_runner( - config_directory="./policies", - policies = org_policies - ) - - assert len(resources) == 6 diff --git a/tests/modules/project/fixture/main.tf b/tests/modules/project/fixture/main.tf index a9867e5d..08cf49dc 100644 --- a/tests/modules/project/fixture/main.tf +++ b/tests/modules/project/fixture/main.tf @@ -25,12 +25,12 @@ module "test" { iam_additive_members = var.iam_additive_members labels = var.labels lien_reason = var.lien_reason + org_policies = var.org_policies + org_policies_data_path = var.org_policies_data_path oslogin = var.oslogin oslogin_admins = var.oslogin_admins oslogin_users = var.oslogin_users parent = var.parent - policy_boolean = var.policy_boolean - policy_list = var.policy_list prefix = var.prefix service_encryption_key_ids = var.service_encryption_key_ids services = var.services @@ -63,4 +63,3 @@ module "test-svpc-service" { } } } - diff --git a/tests/modules/project/fixture/variables.tf b/tests/modules/project/fixture/variables.tf index 2a4d95d1..93843396 100644 --- a/tests/modules/project/fixture/variables.tf +++ b/tests/modules/project/fixture/variables.tf @@ -64,6 +64,16 @@ variable "lien_reason" { default = "" } +variable "org_policies" { + type = any + default = {} +} + +variable "org_policies_data_path" { + type = any + default = null +} + variable "oslogin" { type = bool default = false @@ -84,21 +94,6 @@ variable "parent" { default = null } -variable "policy_boolean" { - type = map(bool) - default = {} -} - -variable "policy_list" { - type = map(object({ - inherit_from_parent = bool - suggested_value = string - status = bool - values = list(string) - })) - default = {} -} - variable "prefix" { type = string default = null diff --git a/tests/modules/project/test_plan_org_policies.py b/tests/modules/project/test_plan_org_policies.py index 645db0df..28494467 100644 --- a/tests/modules/project/test_plan_org_policies.py +++ b/tests/modules/project/test_plan_org_policies.py @@ -12,47 +12,246 @@ # See the License for the specific language governing permissions and # limitations under the License. +import hcl2 +import yaml + +BOOLEAN_POLICIES = '''{ + "iam.disableServiceAccountKeyCreation" = { + enforce = true + } + "iam.disableServiceAccountKeyUpload" = { + enforce = false + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + enforce = true + } + ] + } +}''' + +LIST_POLICIES = '''{ + "compute.vmExternalIpAccess" = { + deny = { all = true } + } + "iam.allowedPolicyMemberDomains" = { + allow = { + values = ["C0xxxxxxx", "C0yyyyyyy"] + } + } + "compute.restrictLoadBalancerCreationForTypes" = { + deny = { values = ["in:EXTERNAL"] } + rules = [ + { + condition = { + expression = "resource.matchTagId(aa, bb)" + title = "condition" + description = "test condition" + location = "xxx" + } + allow = { + values = ["EXTERNAL_1"] + } + }, + { + condition = { + expression = "resource.matchTagId(cc, dd)" + title = "condition2" + description = "test condition2" + location = "xxx" + } + allow = { + all = true + } + } + ] + } +}''' + + def test_policy_boolean(plan_runner): "Test boolean org policy." - policy_boolean = '{policy-a = true, policy-b = false, policy-c = null}' - _, resources = plan_runner(policy_boolean=policy_boolean) - assert len(resources) == 7 - resources = [r for r in resources if r['type'] - == 'google_project_organization_policy'] - assert sorted([r['index'] for r in resources]) == [ - 'policy-a', 'policy-b', 'policy-c' - ] - policy_values = [] - for resource in resources: - for policy in ('boolean_policy', 'restore_policy'): - value = resource['values'][policy] - if value: - policy_values.append((policy,) + value[0].popitem()) - assert sorted(policy_values) == [ - ('boolean_policy', 'enforced', False), - ('boolean_policy', 'enforced', True), - ('restore_policy', 'default', True) - ] + _, resources = plan_runner(org_policies=BOOLEAN_POLICIES) + validate_policy_boolean_resources(resources) def test_policy_list(plan_runner): "Test list org policy." - policy_list = ( - '{' - 'policy-a = {inherit_from_parent = true, suggested_value = null, status = true, values = []}, ' - 'policy-b = {inherit_from_parent = null, suggested_value = "foo", status = false, values = ["bar"]}, ' - 'policy-c = {inherit_from_parent = null, suggested_value = true, status = null, values = null}' - '}' - ) - _, resources = plan_runner(policy_list=policy_list) + _, resources = plan_runner(org_policies=LIST_POLICIES) + validate_policy_list_resources(resources) + + +def test_policy_boolean_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {BOOLEAN_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_boolean_resources(resources) + + +def test_policy_list_factory(plan_runner, tmp_path): + # convert hcl policies to yaml + hcl_policies = f'p = {LIST_POLICIES}' + yaml_policies = yaml.dump(hcl2.loads(hcl_policies)['p']) + + yaml_file = tmp_path / 'policies.yaml' + yaml_file.write_text(yaml_policies) + + _, resources = plan_runner(org_policies_data_path=f'"{tmp_path}"') + validate_policy_list_resources(resources) + + +def validate_policy_boolean_resources(resources): + assert len(resources) == 6 + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 2 + assert all(x['values']['parent'] == 'projects/my-project' for x in policies) + + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyCreation' + ][0] + + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.disableServiceAccountKeyUpload' + ][0] + + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert len(p2['rules']) == 2 + assert p2['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': 'FALSE', + 'values': [] + } + assert p2['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': 'TRUE', + 'values': [] + } + + +def validate_policy_list_resources(resources): assert len(resources) == 7 - values = [r['values'] for r in resources if r['type'] - == 'google_project_organization_policy'] - assert [r['constraint'] for r in values] == [ - 'policy-a', 'policy-b', 'policy-c' - ] - assert values[0]['list_policy'][0]['allow'] == [ - {'all': True, 'values': None}] - assert values[1]['list_policy'][0]['deny'] == [ - {'all': False, 'values': ["bar"]}] - assert values[2]['restore_policy'] == [{'default': True}] + + policies = [r for r in resources if r['type'] == 'google_org_policy_policy'] + assert len(policies) == 3 + assert all(x['values']['parent'] == 'projects/my-project' for x in policies) + + p1 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.vmExternalIpAccess' + ][0] + assert p1['inherit_from_parent'] is None + assert p1['reset'] is None + assert p1['rules'] == [{ + 'allow_all': None, + 'condition': [], + 'deny_all': 'TRUE', + 'enforce': None, + 'values': [] + }] + + p2 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'iam.allowedPolicyMemberDomains' + ][0] + assert p2['inherit_from_parent'] is None + assert p2['reset'] is None + assert p2['rules'] == [{ + 'allow_all': + None, + 'condition': [], + 'deny_all': + None, + 'enforce': + None, + 'values': [{ + 'allowed_values': [ + 'C0xxxxxxx', + 'C0yyyyyyy', + ], + 'denied_values': None + }] + }] + + p3 = [ + r['values']['spec'][0] + for r in policies + if r['index'] == 'compute.restrictLoadBalancerCreationForTypes' + ][0] + assert p3['inherit_from_parent'] is None + assert p3['reset'] is None + assert len(p3['rules']) == 3 + assert p3['rules'][0] == { + 'allow_all': None, + 'condition': [], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': None, + 'denied_values': ['in:EXTERNAL'] + }] + } + + assert p3['rules'][1] == { + 'allow_all': None, + 'condition': [{ + 'description': 'test condition', + 'expression': 'resource.matchTagId(aa, bb)', + 'location': 'xxx', + 'title': 'condition' + }], + 'deny_all': None, + 'enforce': None, + 'values': [{ + 'allowed_values': ['EXTERNAL_1'], + 'denied_values': None + }] + } + + assert p3['rules'][2] == { + 'allow_all': 'TRUE', + 'condition': [{ + 'description': 'test condition2', + 'expression': 'resource.matchTagId(cc, dd)', + 'location': 'xxx', + 'title': 'condition2' + }], + 'deny_all': None, + 'enforce': None, + 'values': [] + } diff --git a/tests/requirements.txt b/tests/requirements.txt index 931b1730..3eb583ab 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,6 +1,6 @@ pytest>=6.2.5 -pytest-xdist PyYAML>=6.0 -tftest>=1.6.3 +tftest>=1.7.6 marko>=1.2.0 deepdiff>=5.7.0 +python-hcl2>=3.0.5