Merge pull request #1053 from GoogleCloudPlatform/jccb/examples-inventory

Extend inventory-based testing to examples
This commit is contained in:
Julio Castillo 2022-12-18 20:50:33 +01:00 committed by GitHub
commit 95e015b1ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
160 changed files with 2325 additions and 2724 deletions

View File

@ -55,7 +55,7 @@ tf output -raw troubleshooting_payload
A monitoring dashboard can be optionally be deployed int he same project by setting the `dashboard_json_path` variable to the path of a dashboard JSON file. A sample dashboard is in included, and can be deployed with this variable configuration:
```hcl
```tfvars
dashboard_json_path = "../dashboards/quotas-utilization.json"
```
<!-- BEGIN TFDOC -->

View File

@ -7,7 +7,7 @@ This is a helper module to prepare GCP Credentials from Terraform Enterprise wor
module "tfe_oidc" {
source = "./tfc-oidc"
impersonate_service_account_email = "tfe-test@tfe-test-wif.iam.gserviceaccount.com"
impersonate_service_account_email = "tfe-test@tfe-test-wif.iam.gserviceaccount.com"
}
provider "google" {

View File

@ -11,9 +11,9 @@ Yaml abstraction for Groups can simplify groups creation and members management.
```hcl
module "prod-firewall" {
source = "./fabric/blueprints/factories/cloud-identity-group-factory"
customer_id = "customers/C0xxxxxxx"
data_dir = "data"
customer_id = "customers/C0xxxxxxx"
data_dir = "data"
}
# tftest skip
```

View File

@ -14,14 +14,14 @@ Nested folder structure for yaml configurations is optionally supported, which a
module "prod-firewall" {
source = "./fabric/blueprints/factories/net-vpc-firewall-yaml"
project_id = "my-prod-project"
network = "my-prod-network"
project_id = "my-prod-project"
network = "my-prod-network"
config_directories = [
"./prod",
"./common"
]
log_config = {
log_config = {
metadata = "INCLUDE_ALL_METADATA"
}
}
@ -29,8 +29,8 @@ module "prod-firewall" {
module "dev-firewall" {
source = "./fabric/blueprints/factories/net-vpc-firewall-yaml"
project_id = "my-dev-project"
network = "my-dev-network"
project_id = "my-dev-project"
network = "my-dev-network"
config_directories = [
"./dev",
"./common"

View File

@ -49,8 +49,8 @@ locals {
trimsuffix(f, ".yaml") => yamldecode(file("${local._data_dir}/${f}"))
}
# these are usually set via variables
_base_dir = "./fabric/blueprints/factories/project-factory"
_data_dir = "${local._base_dir}/sample-data/projects/"
_base_dir = "./fabric/blueprints/factories/project-factory"
_data_dir = "${local._base_dir}/sample-data/projects/"
_defaults_file = "${local._base_dir}/sample-data/defaults.yaml"
}

View File

@ -78,7 +78,7 @@ module "gke-fleet" {
location = "europe-west1"
private_cluster_config = local.cluster_defaults.private_cluster_config
vpc_config = {
subnetwork = local.subnet_self_links.ew1
subnetwork = local.subnet_self_links.ew1
master_ipv4_cidr_block = "172.16.10.0/28"
}
}
@ -86,7 +86,7 @@ module "gke-fleet" {
location = "europe-west3"
private_cluster_config = local.cluster_defaults.private_cluster_config
vpc_config = {
subnetwork = local.subnet_self_links.ew3
subnetwork = local.subnet_self_links.ew3
master_ipv4_cidr_block = "172.16.20.0/28"
}
}
@ -95,16 +95,16 @@ module "gke-fleet" {
cluster-0 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
spot = true
spot = true
}
}
}
cluster-1 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
}
}
@ -143,13 +143,13 @@ module "gke" {
prefix = "myprefix"
clusters = {
cluster-0 = {
location = "europe-west1"
location = "europe-west1"
vpc_config = {
subnetwork = local.subnet_self_links.ew1
}
}
cluster-1 = {
location = "europe-west3"
location = "europe-west3"
vpc_config = {
subnetwork = local.subnet_self_links.ew3
}
@ -159,16 +159,16 @@ module "gke" {
cluster-0 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
spot = true
spot = true
}
}
}
cluster-1 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
}
}
@ -205,14 +205,14 @@ module "gke" {
enable_hierarchical_resource_quota = true
enable_pod_tree_labels = true
}
policy_controller = {
policy_controller = {
audit_interval_seconds = 30
exemptable_namespaces = ["kube-system"]
log_denies_enabled = true
referential_rules_enabled = true
template_library_installed = true
}
version = "1.10.2"
version = "1.10.2"
}
}
fleet_configmanagement_clusters = {

View File

@ -35,12 +35,12 @@ You can easily create such a project by commenting turning on project creation i
```hcl
module "project" {
source = "../../../modules/project"
name = var.project_id
source = "../../../modules/project"
name = var.project_id
# comment or remove this line to enable project creation
# project_create = false
# add the following line with your billing account id value
billing_account = "12345-ABCD-12345"
billing_account = "12345-ABCD-12345"
services = [
"compute.googleapis.com",
"dns.googleapis.com"

View File

@ -34,7 +34,7 @@ The `repositories` variable is where you configure which repositories to create,
This is an example that creates repositories for stages 00 and 01, defines an existing repositories as the source for modules, and populates initial files for stages 00, 01, and 02:
```hcl
```tfvars
organization = "ludomagno"
repositories = {
fast_00_bootstrap = {

View File

@ -226,7 +226,7 @@ Alongisde the GCS stored files, you can also configure a second copy to be saves
This second set of files is disabled by default, you can enable it by setting the `outputs_location` variable to a valid path on a local filesystem, e.g.
```hcl
```tfvars
outputs_location = "~/fast-config"
```
@ -297,10 +297,11 @@ variable "groups" {
description = "Group names to grant organization-level permissions."
type = map(string)
default = {
gcp-network-admins = "net-rockstars"
gcp-network-admins = "net-rockstars"
# [...]
}
}
# tftest skip
```
If your groups layout differs substantially from the checklist, define all relevant groups in the `groups` variable, then rearrange IAM roles in the code to match your setup.
@ -359,7 +360,7 @@ Provider key names are used by the `cicd_repositories` variable to configure aut
This is a sample configuration of a GitHub and a Gitlab provider, `attribute_condition` attribute can use any of the mapped attribute for the provider (refer to the `identity-providers.tf` file for the full list) or set to `null` if needed:
```hcl
```tfvars
federated_identity_providers = {
github-sample = {
attribute_condition = "attribute.repository_owner==\"my-github-org\""
@ -374,9 +375,9 @@ federated_identity_providers = {
gitlab-ce-sample = {
attribute_condition = "attribute.namespace_path==\"my-gitlab-org\""
issuer = "gitlab"
custom_settings = {
issuer_uri = "https://gitlab.fast.example.com"
allowed_audiences = ["https://gitlab.fast.example.com"]
custom_settings = {
issuer_uri = "https://gitlab.fast.example.com"
allowed_audiences = ["https://gitlab.fast.example.com"]
}
}
}
@ -390,7 +391,7 @@ The repository design we support is fairly simple, with a repository for modules
This is an example of configuring the bootstrap and resource management repositories in this stage. CI/CD configuration is optional, so the entire variable or any of its attributes can be set to null if not needed.
```hcl
```tfvars
cicd_repositories = {
bootstrap = {
branch = null

View File

@ -109,7 +109,7 @@ This stage provides a single built-in customization that offers a minimal (but u
Consider the following example in a `tfvars` file:
```hcl
```tfvars
team_folders = {
team-a = {
descriptive_name = "Team A"

View File

@ -114,7 +114,7 @@ To support these scenarios, key IAM bindings are configured by default to be add
An example of how to configure keys:
```hcl
```tfvars
# terraform.tfvars
kms_defaults = {
@ -128,14 +128,14 @@ kms_keys = {
"user:user1@example.com"
]
}
labels = { service = "compute" }
locations = null
labels = { service = "compute" }
locations = null
rotation_period = null
}
storage = {
iam = null
labels = { service = "compute" }
locations = ["europe"]
iam = null
labels = { service = "compute" }
locations = ["europe"]
rotation_period = null
}
}
@ -162,7 +162,7 @@ The VPC SC configuration is set up by default in dry-run mode to allow easy expe
Access levels are defined via the `vpc_sc_access_levels` variable, and referenced by key in perimeter definitions:
```hcl
```tfvars
vpc_sc_access_levels = {
onprem = {
conditions = [{
@ -176,7 +176,7 @@ vpc_sc_access_levels = {
Ingress and egress policy are defined via the `vpc_sc_egress_policies` and `vpc_sc_ingress_policies`, and referenced by key in perimeter definitions:
```hcl
```tfvars
vpc_sc_egress_policies = {
iac-gcs = {
from = {
@ -187,7 +187,7 @@ vpc_sc_egress_policies = {
to = {
operations = [{
method_selectors = ["*"]
service_name = "storage.googleapis.com"
service_name = "storage.googleapis.com"
}]
resources = ["projects/123456782"]
}
@ -217,7 +217,7 @@ Support for independently adding projects to perimeters outside of this Terrafor
Access levels and egress/ingress policies are referenced in perimeters via keys.
```hcl
```tfvars
vpc_sc_perimeters = {
dev = {
egress_policies = ["iac-gcs"]

View File

@ -7,11 +7,11 @@ Note: this module will integrated into a general-purpose load balancing module i
## Example
```hcl
module "neg" {
source = "./fabric/modules/net-neg"
source = "./fabric/modules/__experimental/net-neg/"
project_id = "myproject"
name = "myneg"
network = module.vpc.self_link
subnetwork = module.vpc.subnet_self_links["europe-west1/default"]
network = var.vpc.self_link
subnetwork = var.subnet.self_link
zone = "europe-west1-b"
endpoints = [
for instance in module.vm.instances :
@ -22,6 +22,7 @@ module "neg" {
}
]
}
# tftest skip
```
<!-- BEGIN TFDOC -->

View File

@ -6,11 +6,11 @@ This module allows creating an API with its associated API config and API gatewa
## Basic example
```hcl
module "gateway" {
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
# The OpenAPI spec contents
# ...
EOT
@ -31,7 +31,7 @@ module "gateway" {
EOT
service_account_email = "sa@my-project.iam.gserviceaccount.com"
iam = {
"roles/apigateway.admin" = [ "user:user@example.com" ]
"roles/apigateway.admin" = ["user:user@example.com"]
}
}
# tftest modules=1 resources=7
@ -40,18 +40,18 @@ module "gateway" {
## Basic example + service account creation
```hcl
module "gateway" {
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
# The OpenAPI spec contents
# ...
EOT
service_account_create = true
iam = {
"roles/apigateway.admin" = [ "user:mirene@google.com" ]
"roles/apigateway.viewer" = [ "user:mirene@google.com" ]
"roles/apigateway.admin" = ["user:mirene@google.com"]
"roles/apigateway.viewer" = ["user:mirene@google.com"]
}
}
# tftest modules=1 resources=11

View File

@ -25,14 +25,14 @@ module "apigee" {
}
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
apis-prod = {
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
iam = {
"roles/viewer" = ["group:devops@myorg.com"]
}
@ -71,10 +71,10 @@ module "apigee" {
source = "./fabric/modules/apigee"
project_id = "my-project"
organization = {
display_name = "My Organization"
description = "My Organization"
runtime_type = "HYBRID"
analytics_region = "europe-west1"
display_name = "My Organization"
description = "My Organization"
runtime_type = "HYBRID"
analytics_region = "europe-west1"
}
envgroups = {
test = ["test.example.com"]
@ -82,14 +82,14 @@ module "apigee" {
}
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
apis-prod = {
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
iam = {
"roles/viewer" = ["group:devops@myorg.com"]
}
@ -120,9 +120,9 @@ module "apigee" {
project_id = "my-project"
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
}
}

View File

@ -21,7 +21,7 @@ The access variables are split into `access` and `access_identities` variables,
module "bigquery-dataset" {
source = "./fabric/modules/bigquery-dataset"
project_id = "my-project"
id = "my-dataset"
id = "my-dataset"
access = {
reader-group = { role = "READER", type = "group" }
owner = { role = "OWNER", type = "user" }
@ -46,7 +46,7 @@ Access configuration can also be specified via IAM instead of basic roles via th
module "bigquery-dataset" {
source = "./fabric/modules/bigquery-dataset"
project_id = "my-project"
id = "my-dataset"
id = "my-dataset"
iam = {
"roles/bigquery.dataOwner" = ["user:user1@example.org"]
}

View File

@ -16,19 +16,19 @@ This module allows managing a single BigTable instance, including access configu
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-west1-b"
tables = {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-west1-b"
tables = {
test1 = null,
test2 = {
split_keys = ["a", "b", "c"]
column_family = null
}
}
iam = {
iam = {
"roles/bigtable.user" = ["user:viewer@testdomain.com"]
}
}
@ -59,11 +59,11 @@ If you use autoscaling, you should not set the variable `num_nodes`.
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-b"
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-b"
autoscaling_config = {
min_nodes = 3
max_nodes = 7
@ -78,12 +78,12 @@ module "bigtable-instance" {
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-a"
storage_type = "SSD"
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-a"
storage_type = "SSD"
autoscaling_config = {
min_nodes = 3
max_nodes = 7

View File

@ -29,7 +29,7 @@ module "budget" {
]
email_recipients = {
project_id = "my-project"
emails = ["user@example.com"]
emails = ["user@example.com"]
}
}
# tftest modules=1 resources=2

View File

@ -8,8 +8,8 @@ This module simplifies the creation of a Binary Authorization policy, attestors
```hcl
module "binauthz" {
source = "./fabric/modules/binauthz"
project_id = "my_project"
source = "./fabric/modules/binauthz"
project_id = "my_project"
global_policy_evaluation_mode = "DISABLE"
default_admission_rule = {
evaluation_mode = "ALWAYS_DENY"
@ -18,16 +18,16 @@ module "binauthz" {
}
cluster_admission_rules = {
"europe-west1-c.cluster" = {
evaluation_mode = "REQUIRE_ATTESTATION"
enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG"
attestors = [ "test" ]
evaluation_mode = "REQUIRE_ATTESTATION"
enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG"
attestors = ["test"]
}
}
attestors_config = {
"test": {
note_reference = null
pgp_public_keys = [
<<EOT
"test" : {
note_reference = null
pgp_public_keys = [
<<EOT
mQENBFtP0doBCADF+joTiXWKVuP8kJt3fgpBSjT9h8ezMfKA4aXZctYLx5wslWQl
bB7Iu2ezkECNzoEeU7WxUe8a61pMCh9cisS9H5mB2K2uM4Jnf8tgFeXn3akJDVo0
oR1IC+Dp9mXbRSK3MAvKkOwWlG99sx3uEdvmeBRHBOO+grchLx24EThXFOyP9Fk6
@ -44,11 +44,11 @@ module "binauthz" {
qoIRW6y0+UlAc+MbqfL0ziHDOAmcqz1GnROg
=6Bvm
EOT
]
pkix_public_keys = null
iam = {
"roles/viewer" = ["user:user1@my_org.com"]
}
]
pkix_public_keys = null
iam = {
"roles/viewer" = ["user:user1@my_org.com"]
}
}
}
}

View File

@ -24,7 +24,7 @@ This example will create a `cloud-config` that uses the module's defaults, creat
```hcl
module "cos-coredns" {
source = "./fabric/modules/cloud-config-container/coredns"
source = "./fabric/modules/cloud-config-container/coredns"
}
module "vm" {
@ -56,7 +56,7 @@ This example will create a `cloud-config` using a custom CoreDNS configuration,
```hcl
module "cos-coredns" {
source = "./fabric/modules/cloud-config-container/coredns"
source = "./fabric/modules/cloud-config-container/coredns"
coredns_config = "./fabric/modules/cloud-config-container/coredns/Corefile-hosts"
files = {
"/etc/coredns/example.hosts" = {
@ -64,7 +64,7 @@ module "cos-coredns" {
owner = null
permissions = "0644"
}
}
}
}
# tftest modules=0 resources=0
```

View File

@ -12,7 +12,7 @@ This example will create a `cloud-config` that starts [Envoy Proxy](https://www.
```hcl
module "cos-envoy" {
source = "./fabric/modules/cloud-config-container/cos-generic-metadata"
source = "./fabric/modules/cloud-config-container/cos-generic-metadata"
container_image = "envoyproxy/envoy:v1.14.1"
container_name = "envoy"
container_args = "-c /etc/envoy/envoy.yaml --log-level info --allow-unknown-static-fields"

View File

@ -62,7 +62,7 @@ module "cos-mysql" {
source = "./fabric/modules/cloud-config-container/mysql"
mysql_config = "./my.cnf"
mysql_password = "CiQAsd7WY=="
kms_config = {
kms_config = {
project_id = "my-project"
keyring = "test-cos"
location = "europe-west1"

View File

@ -24,7 +24,7 @@ This example will create a `cloud-config` that uses the module's defaults, creat
```hcl
module "cos-nginx" {
source = "./fabric/modules/cloud-config-container/nginx"
source = "./fabric/modules/cloud-config-container/nginx"
}
module "vm-nginx-tls" {

View File

@ -24,9 +24,9 @@ This example will create a `cloud-config` that allows any client in the 10.0.0.0
```hcl
module "cos-squid" {
source = "./fabric/modules/cloud-config-container/squid"
allow = [".github.com"]
clients = ["10.0.0.0/8"]
source = "./fabric/modules/cloud-config-container/squid"
allow = [".github.com"]
clients = ["10.0.0.0/8"]
}
module "vm" {

View File

@ -16,10 +16,10 @@ This deploys a Cloud Function with an HTTP endpoint, using a pre-existing GCS bu
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -31,11 +31,11 @@ module "cf-http" {
Analogous example using 2nd generation Cloud Functions
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
v2 = true
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
v2 = true
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -111,15 +111,15 @@ To allow anonymous access to the function, grant the `roles/cloudfunctions.invok
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
}
iam = {
iam = {
"roles/cloudfunctions.invoker" = ["allUsers"]
}
}
@ -132,15 +132,15 @@ You can have the module auto-create the GCS bucket used for deployment via the `
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bucket_config = {
lifecycle_delete_age_days = 1
}
bundle_config = {
source_dir = "fabric/assets/"
source_dir = "fabric/assets/"
}
}
# tftest modules=1 resources=3
@ -152,10 +152,10 @@ To use a custom service account managed by the module, set `service_account_crea
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -169,10 +169,10 @@ To use an externally managed service account, pass its email in `service_account
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -188,10 +188,10 @@ In order to help prevent `archive_zip.output_md5` from changing cross platform (
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets"
output_path = "bundle.zip"
@ -207,10 +207,10 @@ This deploys a Cloud Function with an HTTP endpoint, using a pre-existing GCS bu
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
build_worker_pool = "projects/my-project/locations/europe-west1/workerPools/my_build_worker_pool"
bundle_config = {
source_dir = "fabric/assets"

View File

@ -46,7 +46,7 @@ module "group" {
]
managers = [
"user3@example.com"
]
]
}
# tftest modules=1 resources=5
```

View File

@ -14,18 +14,18 @@ module "cloud_run" {
project_id = "my-project"
name = "hello"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = {
command = null
args = null
env = {
"VAR1": "VALUE1",
"VAR2": "VALUE2",
env = {
"VAR1" : "VALUE1",
"VAR2" : "VALUE2",
}
env_from = null
}
ports = null
resources = null
ports = null
resources = null
volume_mounts = null
}]
}
@ -42,18 +42,18 @@ module "cloud_run" {
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = {
command = null
args = null
env = null
env_from = {
"CREDENTIALS": {
command = null
args = null
env = null
env_from = {
"CREDENTIALS" : {
name = "credentials"
key = "1"
key = "1"
}
}
}
ports = null
resources = null
ports = null
resources = null
volume_mounts = null
}]
}
@ -64,26 +64,26 @@ module "cloud_run" {
```hcl
module "cloud_run" {
source = "./fabric/modules/cloud-run"
project_id = var.project_id
name = "hello"
region = var.region
source = "./fabric/modules/cloud-run"
project_id = var.project_id
name = "hello"
region = var.region
revision_name = "green"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = null
ports = null
resources = null
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = null
ports = null
resources = null
volume_mounts = {
"credentials": "/credentials"
"credentials" : "/credentials"
}
}]
volumes = [
{
name = "credentials"
name = "credentials"
secret_name = "credentials"
items = [{
key = "1"
key = "1"
path = "v1.txt"
}]
}
@ -98,9 +98,9 @@ This deploys a Cloud Run service with traffic split between two revisions.
```hcl
module "cloud_run" {
source = "./fabric/modules/cloud-run"
project_id = "my-project"
name = "hello"
source = "./fabric/modules/cloud-run"
project_id = "my-project"
name = "hello"
revision_name = "green"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
@ -110,7 +110,7 @@ module "cloud_run" {
volume_mounts = null
}]
traffic = {
"blue" = 25
"blue" = 25
"green" = 75
}
}
@ -159,8 +159,8 @@ module "cloud_run" {
}]
audit_log_triggers = [
{
service_name = "cloudresourcemanager.googleapis.com"
method_name = "SetIamPolicy"
service_name = "cloudresourcemanager.googleapis.com"
method_name = "SetIamPolicy"
}
]
}

View File

@ -88,7 +88,7 @@ module "db" {
# generatea password for user1
user1 = null
# assign a password to user2
user2 = "mypassword"
user2 = "mypassword"
}
}
# tftest modules=1 resources=6

View File

@ -243,9 +243,9 @@ module "nginx-mig" {
target_size = 3
instance_template = module.nginx-template.template.self_link
update_policy = {
minimal_action = "REPLACE"
type = "PROACTIVE"
min_ready_sec = 30
minimal_action = "REPLACE"
type = "PROACTIVE"
min_ready_sec = 30
max_surge = {
fixed = 1
}
@ -393,8 +393,8 @@ module "nginx-mig" {
stateful_config = {
# name needs to match a MIG instance name
instance-1 = {
minimal_action = "NONE",
most_disruptive_allowed_action = "REPLACE"
minimal_action = "NONE",
most_disruptive_allowed_action = "REPLACE"
preserved_state = {
disks = {
persistent-disk-1 = {

View File

@ -110,7 +110,7 @@ module "simple-vm-example" {
}
}]
service_account_create = true
create_template = true
create_template = true
}
# tftest modules=1 resources=2
```
@ -131,8 +131,8 @@ module "kms-vm-example" {
}]
attached_disks = [
{
name = "attached-disk"
size = 10
name = "attached-disk"
size = 10
}
]
service_account_create = true
@ -176,9 +176,9 @@ This example shows how to enable [gVNIC](https://cloud.google.com/compute/docs/n
```hcl
resource "google_compute_image" "cos-gvnic" {
project = "my-project"
name = "my-image"
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-89-16108-534-18"
project = "my-project"
name = "my-image"
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-89-16108-534-18"
guest_os_features {
type = "GVNIC"
@ -200,8 +200,8 @@ module "vm-with-gvnic" {
zone = "europe-west1-b"
name = "test"
boot_disk = {
image = google_compute_image.cos-gvnic.self_link
type = "pd-ssd"
image = google_compute_image.cos-gvnic.self_link
type = "pd-ssd"
}
network_interfaces = [{
network = var.vpc.self_link

View File

@ -12,7 +12,7 @@ module "cmn-dc" {
source = "./fabric/modules/data-catalog-policy-tag"
name = "my-datacatalog-policy-tags"
project_id = "my-project"
tags = {
tags = {
low = null, medium = null, high = null
}
}
@ -26,10 +26,10 @@ module "cmn-dc" {
source = "./fabric/modules/data-catalog-policy-tag"
name = "my-datacatalog-policy-tags"
project_id = "my-project"
tags = {
low = null
tags = {
low = null
medium = null
high = {"roles/datacatalog.categoryFineGrainedReader" = ["group:GROUP_NAME@example.com"]}
high = { "roles/datacatalog.categoryFineGrainedReader" = ["group:GROUP_NAME@example.com"] }
}
iam = {
"roles/datacatalog.categoryAdmin" = ["group:GROUP_NAME@example.com"]

View File

@ -8,11 +8,11 @@ This module allows simple management of ['Google Data Fusion'](https://cloud.goo
```hcl
module "datafusion" {
source = "./fabric/modules/datafusion"
name = "my-datafusion"
region = "europe-west1"
project_id = "my-project"
network = "my-network-name"
source = "./fabric/modules/datafusion"
name = "my-datafusion"
region = "europe-west1"
project_id = "my-project"
network = "my-network-name"
# TODO: remove the following line
firewall_create = false
}

View File

@ -22,7 +22,7 @@ module "endpoint" {
```
```yaml
# tftest file openapi configs/endpoints/openapi.yaml
# tftest-file id=openapi path=configs/endpoints/openapi.yaml
swagger: "2.0"
info:
description: "A simple Google Cloud Endpoints API example."

View File

@ -10,11 +10,11 @@ This module allows the creation and management of folders, including support for
module "folder" {
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
group_iam = {
name = "Folder name"
group_iam = {
"cloud-owners@example.org" = [
"roles/owner",
"roles/resourcemanager.projectCreator"
"roles/owner",
"roles/resourcemanager.projectCreator"
]
}
iam = {
@ -32,7 +32,7 @@ To manage organization policies, the `orgpolicy.googleapis.com` service should b
module "folder" {
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
name = "Folder name"
org_policies = {
"compute.disableGuestAttributesAccess" = {
enforce = true
@ -85,9 +85,9 @@ In the same way as for the [organization](../organization) module, the in-built
```hcl
module "folder" {
source = "./fabric/modules/folder"
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
name = "Folder name"
firewall_policy_factory = {
cidr_file = "configs/firewall-policies/cidrs.yaml"
policy_name = null
@ -101,7 +101,7 @@ module "folder" {
```
```yaml
# tftest file cidrs configs/firewall-policies/cidrs.yaml
# tftest-file id=cidrs path=configs/firewall-policies/cidrs.yaml
rfc1918:
- 10.0.0.0/8
- 172.16.0.0/12
@ -109,7 +109,7 @@ rfc1918:
```
```yaml
# tftest file rules configs/firewall-policies/rules.yaml
# tftest-file id=rules path=configs/firewall-policies/rules.yaml
allow-admins:
description: Access from the admin subnet to all subnets
direction: INGRESS
@ -250,8 +250,8 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = null
description = "Environment specification."
iam = null
values = {
dev = null
prod = null

View File

@ -62,7 +62,7 @@ module "bucket" {
source = "./fabric/modules/gcs"
project_id = "myproject"
prefix = "test"
name = "my-bucket"
name = "my-bucket"
iam = {
"roles/storage.admin" = ["group:storage@example.com"]

View File

@ -22,7 +22,7 @@ module "cluster-1" {
master_authorized_ranges = {
internal-vms = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
max_pods_per_node = 32
private_cluster_config = {
@ -54,7 +54,7 @@ module "cluster-1" {
master_authorized_ranges = {
internal-vms = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
private_cluster_config = {
enable_private_endpoint = true

View File

@ -56,7 +56,7 @@ module "cluster_1" {
master_authorized_ranges = {
fc1918_10_8 = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
enable_features = {
dataplane_v2 = true
@ -115,7 +115,7 @@ module "hub" {
}
}
configmanagement_clusters = {
"default" = [ "cluster-1" ]
"default" = ["cluster-1"]
}
}
@ -216,7 +216,7 @@ module "cluster_1" {
mgmt = "10.0.0.0/28"
pods-cluster-1 = "10.3.0.0/16"
}
master_ipv4_cidr_block = "192.168.1.0/28"
master_ipv4_cidr_block = "192.168.1.0/28"
}
private_cluster_config = {
enable_private_endpoint = false
@ -240,10 +240,10 @@ module "cluster_1_nodepool" {
}
module "cluster_2" {
source = "./fabric/modules/gke-cluster"
project_id = module.project.project_id
name = "cluster-2"
location = "europe-west4"
source = "./fabric/modules/gke-cluster"
project_id = module.project.project_id
name = "cluster-2"
location = "europe-west4"
vpc_config = {
network = module.vpc.self_link
subnetwork = module.vpc.subnet_self_links["europe-west4/subnet-cluster-2"]
@ -251,7 +251,7 @@ module "cluster_2" {
mgmt = "10.0.0.0/28"
pods-cluster-1 = "10.3.0.0/16"
}
master_ipv4_cidr_block = "192.168.2.0/28"
master_ipv4_cidr_block = "192.168.2.0/28"
}
private_cluster_config = {
enable_private_endpoint = false
@ -264,11 +264,11 @@ module "cluster_2" {
}
module "cluster_2_nodepool" {
source = "./fabric/modules/gke-nodepool"
project_id = module.project.project_id
cluster_name = module.cluster_2.name
location = "europe-west4"
name = "nodepool"
source = "./fabric/modules/gke-nodepool"
project_id = module.project.project_id
cluster_name = module.cluster_2.name
location = "europe-west4"
name = "nodepool"
node_count = { initial = 1 }
service_account = { create = true }
tags = ["cluster-2-node"]
@ -277,7 +277,7 @@ module "cluster_2_nodepool" {
module "hub" {
source = "./fabric/modules/gke-hub"
project_id = module.project.project_id
clusters = {
clusters = {
cluster-1 = module.cluster_1.id
cluster-2 = module.cluster_2.id
}

View File

@ -10,11 +10,11 @@ If no specific node configuration is set via variables, the module uses the prov
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
}
# tftest modules=1 resources=1
```
@ -31,11 +31,11 @@ To use the GCE default service account, you can ignore the variable which is equ
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
}
# tftest modules=1 resources=1
```
@ -46,11 +46,11 @@ To use an existing service account, pass in just the `email` attribute.
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
service_account = {
email = "foo-bar@myproject.iam.gserviceaccount.com"
}
@ -64,11 +64,11 @@ To have the module create a service account, set the `create` attribute to `true
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
service_account = {
create = true
# optional

View File

@ -8,12 +8,11 @@ Note that this module does not fully comply with our design principles, as outpu
```hcl
module "myproject-default-service-accounts" {
source = "./fabric/modules/iam-service-account"
project_id = "myproject"
name = "vm-default"
generate_key = true
source = "./fabric/modules/iam-service-account"
project_id = "myproject"
name = "vm-default"
# authoritative roles granted *on* the service accounts to other identities
iam = {
iam = {
"roles/iam.serviceAccountUser" = ["user:foo@example.com"]
}
# non-authoritative roles granted *to* the service accounts on other resources
@ -24,7 +23,7 @@ module "myproject-default-service-accounts" {
]
}
}
# tftest modules=1 resources=5
# tftest modules=1 resources=4 inventory=basic.yaml
```
<!-- TFDOC OPTS files:1 -->
<!-- BEGIN TFDOC -->

View File

@ -14,9 +14,9 @@ In this module **no lifecycle blocks are set on resources to prevent destroy**,
```hcl
module "kms" {
source = "./fabric/modules/kms"
project_id = "my-project"
iam = {
source = "./fabric/modules/kms"
project_id = "my-project"
iam = {
"roles/cloudkms.admin" = ["user:user1@example.com"]
}
keyring = { location = "europe-west1", name = "test" }
@ -63,8 +63,8 @@ module "kms" {
```hcl
module "kms" {
source = "./fabric/modules/kms"
project_id = "my-project"
source = "./fabric/modules/kms"
project_id = "my-project"
key_purpose = {
key-c = {
purpose = "ASYMMETRIC_SIGN"
@ -74,8 +74,8 @@ module "kms" {
}
}
}
keyring = { location = "europe-west1", name = "test" }
keys = { key-a = null, key-b = null, key-c = null }
keyring = { location = "europe-west1", name = "test" }
keys = { key-a = null, key-b = null, key-c = null }
}
# tftest modules=1 resources=4
```

View File

@ -27,12 +27,12 @@ module "addresses" {
project_id = var.project_id
internal_addresses = {
ilb-1 = {
purpose = "SHARED_LOADBALANCER_VIP"
purpose = "SHARED_LOADBALANCER_VIP"
region = var.region
subnetwork = var.subnet.self_link
}
ilb-2 = {
address = "10.0.0.2"
address = "10.0.0.2"
region = var.region
subnetwork = var.subnet.self_link
}
@ -66,11 +66,11 @@ module "addresses" {
project_id = var.project_id
psc_addresses = {
one = {
address = null
address = null
network = var.vpc.self_link
}
two = {
address = "10.0.0.32"
address = "10.0.0.32"
network = var.vpc.self_link
}
}

View File

@ -117,7 +117,7 @@ The module uses a classic Global Load Balancer by default. To use the non-classi
```hcl
module "glb-0" {
source = "./fabric/modules/net-glb"
source = "./fabric/modules/net-glb"
project_id = "myprj"
name = "glb-test-0"
use_classic_version = false
@ -320,8 +320,8 @@ module "glb-0" {
neg_configs = {
neg-0 = {
hybrid = {
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
endpoints = [{
ip_address = "10.0.0.10"
port = 80
@ -355,10 +355,10 @@ module "glb-0" {
neg_configs = {
neg-0 = {
internet = {
use_fqdn = true
use_fqdn = true
endpoints = [{
destination = "www.example.org"
port = 80
port = 80
}]
}
}
@ -373,7 +373,7 @@ The module supports managing PSC NEGs if the non-classic version of the load bal
```hcl
module "glb-0" {
source = "./fabric/modules/net-glb"
source = "./fabric/modules/net-glb"
project_id = "myprj"
name = "glb-test-0"
use_classic_version = false
@ -390,7 +390,7 @@ module "glb-0" {
neg_configs = {
neg-0 = {
psc = {
region = "europe-west8"
region = "europe-west8"
target_service = "europe-west8-cloudkms.googleapis.com"
}
}
@ -465,7 +465,7 @@ module "glb-0" {
pathmap = {
default_service = "default"
path_rules = [{
paths = ["/other", "/other/*"]
paths = ["/other", "/other/*"]
service = "other"
}]
}
@ -554,16 +554,16 @@ module "glb-0" {
neg-gce-0 = {
backends = [{
balancing_mode = "RATE"
backend = "neg-ew8-c"
backend = "neg-ew8-c"
max_rate = { per_endpoint = 10 }
}]
}
neg-hybrid-0 = {
backends = [{
backend = "neg-hello"
backend = "neg-hello"
}]
health_checks = ["neg"]
protocol = "HTTPS"
health_checks = ["neg"]
protocol = "HTTPS"
}
}
group_configs = {
@ -600,7 +600,7 @@ module "glb-0" {
gce = {
network = "projects/myprj-host/global/networks/svpc"
subnetwork = "projects/myprj-host/regions/europe-west8/subnetworks/gce"
zone = "europe-west8-c"
zone = "europe-west8-c"
endpoints = [{
instance = "nginx-ew8-c"
ip_address = "10.24.32.26"
@ -610,8 +610,8 @@ module "glb-0" {
}
neg-hello = {
hybrid = {
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
endpoints = [{
ip_address = "192.168.0.3"
port = 443

View File

@ -176,7 +176,7 @@ module "ilb-l7" {
backend_service_configs = {
default = {
port_name = "http"
backends = [
backends = [
{ group = "default" }
]
}
@ -237,7 +237,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -245,11 +245,11 @@ module "ilb-l7" {
neg_configs = {
my-neg = {
gce = {
zone = "europe-west1-b"
zone = "europe-west1-b"
endpoints = [{
instance = "test-1"
ip_address = "10.0.0.10"
port = 80
port = 80
}]
}
}
@ -274,7 +274,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -282,10 +282,10 @@ module "ilb-l7" {
neg_configs = {
my-neg = {
hybrid = {
zone = "europe-west1-b"
zone = "europe-west1-b"
endpoints = [{
ip_address = "10.0.0.10"
port = 80
port = 80
}]
}
}
@ -310,7 +310,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -367,7 +367,7 @@ module "ilb-l7" {
pathmap = {
default_service = "default"
path_rules = [{
paths = ["/video", "/video/*"]
paths = ["/video", "/video/*"]
service = "video"
}]
}
@ -521,7 +521,7 @@ module "ilb-l7" {
}
neg-home-hello = {
hybrid = {
zone = "europe-west8-b"
zone = "europe-west8-b"
endpoints = [{
ip_address = "192.168.0.3"
port = 443

View File

@ -37,7 +37,7 @@ module "ilb" {
}
}
backends = [{
group = module.ilb.groups.my-group.self_link
group = module.ilb.groups.my-group.self_link
}]
health_check_config = {
http = {
@ -96,7 +96,7 @@ module "ilb" {
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
ports = [80]
backends = [
for z, mod in module.instance-group : {

View File

@ -44,11 +44,11 @@ module "firewall" {
default_rules_config = {
admin_ranges = ["10.0.0.0/8"]
}
egress_rules = {
egress_rules = {
# implicit `deny` action
allow-egress-rfc1918 = {
description = "Allow egress to RFC 1918 ranges."
destination_ranges = [
destination_ranges = [
"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"
]
# implicit { protocol = "all" } rule
@ -108,7 +108,7 @@ module "firewall" {
project_id = "my-project"
network = "my-network"
default_rules_config = {
ssh_ranges = []
ssh_ranges = []
}
}
# tftest modules=1 resources=2
@ -134,34 +134,35 @@ The module includes a rules factory (see [Resource Factories](../../blueprints/f
```hcl
module "firewall" {
source = "./fabric/modules/net-vpc-firewall"
project_id = "my-project"
network = "my-network"
source = "./fabric/modules/net-vpc-firewall"
project_id = "my-project"
network = "my-network"
factories_config = {
rules_folder = "configs/firewal/rules"
cidr_tpl_file = "configs/firewal/cidr_template.yaml"
rules_folder = "configs/firewall/rules"
cidr_tpl_file = "configs/firewall/cidrs.yaml"
}
default_rules_config = { disabled = true }
}
# tftest modules=1 resources=3
# tftest modules=1 resources=1 files=lbs,cidrs
```
```yaml
# tftest file configs/firewall/rules/load_balancers.yaml
allow-healthchecks:
description: Allow ingress from healthchecks.
ranges:
- healthchecks
targets: ["lb-backends"]
rules:
- protocol: tcp
ports:
- 80
- 443
# tftest-file id=lbs path=configs/firewall/rules/load_balancers.yaml
ingress:
allow-healthchecks:
description: Allow ingress from healthchecks.
ranges:
- healthchecks
targets: ["lb-backends"]
rules:
- protocol: tcp
ports:
- 80
- 443
```
```yaml
# tftest file configs/firewall/cidr_template.yaml
# tftest-file id=cidrs path=configs/firewall/cidrs.yaml
healthchecks:
- 35.191.0.0/16
- 130.211.0.0/22

View File

@ -29,12 +29,12 @@ locals {
deny = try(rule.deny, false)
rules = try(rule.rules, [{ protocol = "all" }])
description = try(rule.description, null)
destination_ranges = try(rule.destination_ranges, null)
destination_ranges = try(rule.ranges, null)
direction = upper(direction)
disabled = try(rule.disabled, null)
enable_logging = try(rule.enable_logging, null)
priority = try(rule.priority, 1000)
source_ranges = try(rule.source_ranges, null)
source_ranges = try(rule.ranges, null)
sources = try(rule.sources, null)
targets = try(rule.targets, null)
use_service_accounts = try(rule.use_service_accounts, false)

View File

@ -45,9 +45,9 @@ module "vpc-hub" {
project_id = "hub"
name = "vpc-hub"
subnets = [{
ip_cidr_range = "10.0.0.0/24"
name = "subnet-1"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "subnet-1"
region = "europe-west1"
}]
}
@ -56,9 +56,9 @@ module "vpc-spoke-1" {
project_id = "spoke1"
name = "vpc-spoke1"
subnets = [{
ip_cidr_range = "10.0.1.0/24"
name = "subnet-2"
region = "europe-west1"
ip_cidr_range = "10.0.1.0/24"
name = "subnet-2"
region = "europe-west1"
}]
peering_config = {
peer_vpc_self_link = module.vpc-hub.self_link
@ -75,8 +75,8 @@ module "vpc-spoke-1" {
```hcl
locals {
service_project_1 = {
project_id = "project1"
gke_service_account = "gke"
project_id = "project1"
gke_service_account = "gke"
cloud_services_service_account = "cloudsvc"
}
service_project_2 = {
@ -128,9 +128,9 @@ module "vpc" {
name = "my-network"
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
psa_config = {
@ -151,13 +151,13 @@ module "vpc" {
name = "my-network"
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
psa_config = {
ranges = { myrange = "10.0.1.0/24" }
ranges = { myrange = "10.0.1.0/24" }
export_routes = true
import_routes = true
}
@ -205,7 +205,7 @@ module "vpc" {
project_id = "my-project"
name = "my-network"
dns_policy = {
inbound = true
inbound = true
outbound = {
private_ns = ["10.0.0.1"]
public_ns = ["8.8.8.8"]
@ -213,9 +213,9 @@ module "vpc" {
}
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
}
@ -233,11 +233,11 @@ module "vpc" {
name = "my-network"
data_folder = "config/subnets"
}
# tftest modules=1 resources=1 file=subnets
# tftest modules=1 resources=2 files=subnets
```
```yaml
# tftest file subnets ./config/subnets/subnet-name.yaml
# tftest-file id=subnets path=config/subnets/subnet-name.yaml
region: europe-west1
description: Sample description
ip_cidr_range: 10.0.0.0/24
@ -249,9 +249,10 @@ iam_service_accounts: ["fbz@prj.iam.gserviceaccount.com"]
secondary_ip_ranges: # map of secondary ip ranges
secondary-range-a: 192.168.0.0/24
flow_logs: # enable, set to empty map to use defaults
- aggregation_interval: "INTERVAL_5_SEC"
- flow_sampling: 0.5
- metadata: "INCLUDE_ALL_METADATA"
aggregation_interval: "INTERVAL_5_SEC"
flow_sampling: 0.5
metadata: "INCLUDE_ALL_METADATA"
filter_expression: null
```
<!-- BEGIN TFDOC -->

View File

@ -23,11 +23,11 @@ module "vm" {
module "vpn-dynamic" {
source = "./fabric/modules/net-vpn-dynamic"
project_id = "my-project"
region = "europe-west1"
network = var.vpc.name
name = "gateway-1"
source = "./fabric/modules/net-vpn-dynamic"
project_id = "my-project"
region = "europe-west1"
network = var.vpc.name
name = "gateway-1"
router_config = {
asn = 64514
}

View File

@ -13,7 +13,7 @@ module "vpn-1" {
name = "net1-to-net-2"
peer_gateway = { gcp = module.vpn-2.self_link }
router_config = {
asn = 64514
asn = 64514
custom_advertise = {
all_subnets = true
ip_ranges = {
@ -48,7 +48,7 @@ module "vpn-2" {
network = var.vpc2.self_link
name = "net2-to-net1"
router_config = { asn = 64513 }
peer_gateway = { gcp = module.vpn-1.self_link}
peer_gateway = { gcp = module.vpn-1.self_link }
tunnels = {
remote-0 = {
bgp_peer = {

View File

@ -16,22 +16,14 @@ To manage organization policies, the `orgpolicy.googleapis.com` service should b
module "org" {
source = "./fabric/modules/organization"
organization_id = "organizations/1234567890"
group_iam = {
group_iam = {
"cloud-owners@example.org" = ["roles/owner", "roles/projectCreator"]
}
iam = {
iam = {
"roles/resourcemanager.projectCreator" = ["group:cloud-admins@example.org"]
}
org_policy_custom_constraints = {
"custom.gkeEnableAutoUpgrade" = {
resource_types = ["container.googleapis.com/NodePool"]
method_types = ["CREATE"]
condition = "resource.management.autoUpgrade == true"
action_type = "ALLOW"
display_name = "Enable node auto-upgrade"
description = "All node pools must have node auto-upgrade enabled."
}
iam_additive_members = {
"user:compute@example.org" = ["roles/compute.admin", "roles/container.viewer"]
}
org_policies = {
@ -76,7 +68,7 @@ module "org" {
}
}
}
# tftest modules=1 resources=12
# tftest modules=1 resources=13 inventory=basic.yaml
```
## IAM
@ -104,7 +96,7 @@ To manage organization policy custom constraints, the `orgpolicy.googleapis.com`
module "org" {
source = "./fabric/modules/organization"
organization_id = var.organization_id
org_policy_custom_constraints = {
"custom.gkeEnableAutoUpgrade" = {
resource_types = ["container.googleapis.com/NodePool"]
@ -123,7 +115,7 @@ module "org" {
}
}
}
# tftest modules=1 resources=2
# tftest modules=1 resources=2 inventory=custom-constraints.yaml
```
### Org policy custom constraints factory
@ -134,16 +126,20 @@ The example below deploys a few org policy custom constraints split between two
```hcl
module "org" {
source = "./fabric/modules/organization"
organization_id = var.organization_id
source = "./fabric/modules/organization"
organization_id = var.organization_id
org_policy_custom_constraints_data_path = "configs/custom-constraints"
org_policies = {
"custom.gkeEnableAutoUpgrade" = {
enforce = true
}
}
}
# tftest modules=1 resources=3 files=gke,dataproc
# tftest modules=1 resources=3 files=gke inventory=custom-constraints.yaml
```
```yaml
# tftest file gke configs/custom-constraints/gke.yaml
# tftest-file id=gke path=configs/custom-constraints/gke.yaml
custom.gkeEnableLogging:
resource_types:
- container.googleapis.com/Cluster
@ -164,8 +160,9 @@ custom.gkeEnableAutoUpgrade:
description: All node pools must have node auto-upgrade enabled.
```
```yaml
# tftest file dataproc configs/custom-constraints/dataproc.yaml
# tftest-file id=dataproc path=configs/custom-constraints/dataproc.yaml
custom.dataprocNoMoreThan10Workers:
resource_types:
- dataproc.googleapis.com/Cluster
@ -195,6 +192,17 @@ module "org" {
organization_id = var.organization_id
firewall_policies = {
iap-policy = {
allow-admins = {
description = "Access from the admin subnet to all subnets"
direction = "INGRESS"
action = "allow"
priority = 1000
ranges = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
ports = { all = [] }
target_service_accounts = null
target_resources = null
logging = false
}
allow-iap-ssh = {
description = "Always allow ssh from IAP."
direction = "INGRESS"
@ -214,7 +222,7 @@ module "org" {
iap_policy = "iap-policy"
}
}
# tftest modules=1 resources=3
# tftest modules=1 resources=4 inventory=hfw.yaml
```
### Firewall policy factory
@ -227,18 +235,18 @@ module "org" {
organization_id = var.organization_id
firewall_policy_factory = {
cidr_file = "configs/firewall-policies/cidrs.yaml"
policy_name = null
policy_name = "iap-policy"
rules_file = "configs/firewall-policies/rules.yaml"
}
firewall_policy_association = {
factory-policy = module.org.firewall_policy_id["factory"]
iap_policy = module.org.firewall_policy_id["iap-policy"]
}
}
# tftest modules=1 resources=4 files=cidrs,rules
# tftest modules=1 resources=4 files=cidrs,rules inventory=hfw.yaml
```
```yaml
# tftest file cidrs configs/firewall-policies/cidrs.yaml
# tftest-file id=cidrs path=configs/firewall-policies/cidrs.yaml
rfc1918:
- 10.0.0.0/8
- 172.16.0.0/12
@ -246,7 +254,7 @@ rfc1918:
```
```yaml
# tftest file rules configs/firewall-policies/rules.yaml
# tftest-file id=rules path=configs/firewall-policies/rules.yaml
allow-admins:
description: Access from the admin subnet to all subnets
direction: INGRESS
@ -257,19 +265,19 @@ allow-admins:
ports:
all: []
target_resources: null
enable_logging: false
logging: false
allow-ssh-from-iap:
description: Enable SSH from IAP
allow-iap-ssh:
description: "Always allow ssh from IAP."
direction: INGRESS
action: allow
priority: 1002
priority: 100
ranges:
- 35.235.240.0/20
ports:
tcp: ["22"]
target_resources: null
enable_logging: false
logging: false
```
## Logging Sinks
@ -325,7 +333,7 @@ module "org" {
debug = {
destination = module.bucket.id
filter = "severity=DEBUG"
exclusions = {
exclusions = {
no-compute = "logName:compute"
}
type = "logging"
@ -335,7 +343,7 @@ module "org" {
no-gce-instances = "resource.type=gce_instance"
}
}
# tftest modules=5 resources=13
# tftest modules=5 resources=13 inventory=logging.yaml
```
## Custom Roles
@ -353,7 +361,7 @@ module "org" {
(module.org.custom_role_id.myRole) = ["user:me@example.com"]
}
}
# tftest modules=1 resources=2
# tftest modules=1 resources=2 inventory=roles.yaml
```
## Tags
@ -366,12 +374,12 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = {
description = "Environment specification."
iam = {
"roles/resourcemanager.tagAdmin" = ["group:admins@example.com"]
}
values = {
dev = {}
dev = {}
prod = {
description = "Environment: production."
iam = {
@ -386,7 +394,7 @@ module "org" {
foo = "tagValues/12345678"
}
}
# tftest modules=1 resources=7
# tftest modules=1 resources=7 inventory=tags.yaml
```
You can also define network tags, through a dedicated variable *network_tags*:
@ -397,13 +405,13 @@ module "org" {
organization_id = var.organization_id
network_tags = {
net-environment = {
description = "This is a network tag."
network = "my_project/my_vpc"
iam = {
description = "This is a network tag."
network = "my_project/my_vpc"
iam = {
"roles/resourcemanager.tagAdmin" = ["group:admins@example.com"]
}
values = {
dev = null
dev = null
prod = {
description = "Environment: production."
iam = {
@ -414,7 +422,7 @@ module "org" {
}
}
}
# tftest modules=1 resources=5
# tftest modules=1 resources=5 inventory=network-tags.yaml
```
<!-- TFDOC OPTS files:1 -->

View File

@ -2,6 +2,23 @@
This module implements the creation and management of one GCP project including IAM, organization policies, Shared VPC host or service attachment, service API activation, and tag attachment. It also offers a convenient way to refer to managed service identities (aka robot service accounts) for APIs.
# Basic Project Creation
```hcl
module "project" {
source = "./fabric/modules/project"
billing_account = "123456-123456-123456"
name = "myproject"
parent = "folders/1234567890"
prefix = "foo"
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
}
# tftest modules=1 resources=3 inventory=basic.yaml
```
## IAM Examples
IAM is managed via several variables that implement different levels of control:
@ -26,7 +43,7 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
@ -36,7 +53,7 @@ module "project" {
]
}
}
# tftest modules=1 resources=4
# tftest modules=1 resources=4 inventory=iam-authoritative.yaml
```
The `group_iam` variable uses group email addresses as keys and is a convenient way to assign roles to humans following Google's best practices. The end result is readable code that also serves as documentation.
@ -48,10 +65,6 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
group_iam = {
"gcp-security-admins@example.com" = [
"roles/cloudasset.owner",
@ -61,7 +74,7 @@ module "project" {
]
}
}
# tftest modules=1 resources=7
# tftest modules=1 resources=5 inventory=iam-group.yaml
```
### Additive IAM
@ -70,22 +83,37 @@ Additive IAM is typically used where bindings for specific roles are controlled
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
iam_additive = {
"roles/viewer" = [
"roles/viewer" = [
"group:one@example.org",
"group:two@xample.org"
],
"roles/storage.objectAdmin" = [
"roles/storage.objectAdmin" = [
"group:two@example.org"
],
"roles/owner" = [
"roles/owner" = [
"group:three@example.org"
],
}
}
# tftest modules=1 resources=5
# tftest modules=1 resources=5 inventory=iam-additive.yaml
```
### Additive IAM by members
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
iam_additive_members = {
"user:one@example.org" = ["roles/owner"]
"user:two@example.org" = ["roles/owner", "roles/editor"]
}
}
# tftest modules=1 resources=4 inventory=iam-additive-members.yaml
```
### Service Identities and authoritative IAM
@ -94,15 +122,15 @@ As mentioned above, there are cases where authoritative management of specific I
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
group_iam = {
"foo@example.com" = [
"roles/editor"
]
}
iam = {
"roles/editor" = [
"roles/editor" = [
"serviceAccount:${module.project.service_accounts.cloud_services}"
]
}
@ -110,39 +138,32 @@ module "project" {
# tftest modules=1 resources=2
```
## Shared VPC service
## Shared VPC
The module allows managing Shared VPC status for both hosts and service projects, and includes a simple way of assigning Shared VPC roles to service identities.
### Host project
You can enable Shared VPC Host at the project level and manage project service association independently.
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
module "host-project" {
source = "./fabric/modules/project"
name = "my-host-project"
shared_vpc_host_config = {
enabled = true
}
}
# tftest modules=1 resources=2
```
### Service project
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
module "service-project" {
source = "./fabric/modules/project"
name = "my-service-project"
shared_vpc_service_config = {
attach = true
host_project = "my-host-project"
attach = true
host_project = module.host-project.project_id
service_identity_iam = {
"roles/compute.networkUser" = [
"roles/compute.networkUser" = [
"cloudservices", "container-engine"
]
"roles/vpcaccess.user" = [
"roles/vpcaccess.user" = [
"cloudrun"
]
"roles/container.hostServiceAgentUser" = [
@ -151,7 +172,7 @@ module "project" {
}
}
}
# tftest modules=1 resources=6
# tftest modules=2 resources=8 inventory=shared-vpc.yaml
```
## Organization policies
@ -165,10 +186,6 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
org_policies = {
"compute.disableGuestAttributesAccess" = {
enforce = true
@ -208,7 +225,7 @@ module "project" {
}
}
}
# tftest modules=1 resources=10
# tftest modules=1 resources=8 inventory=org-policies.yaml
```
### Organization policy factory
@ -220,63 +237,54 @@ Note that contraints defined via `org_policies` take precedence over those in `o
The example below deploys a few organization policies split between two YAML files.
```hcl
module "folder" {
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
module "project" {
source = "./fabric/modules/project"
billing_account = "123456-123456-123456"
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
org_policies_data_path = "configs/org-policies/"
}
# tftest modules=1 resources=6 files=boolean,list
# tftest modules=1 resources=8 files=boolean,list inventory=org-policies.yaml
```
```yaml
# tftest file boolean configs/org-policies/boolean.yaml
# tftest-file id=boolean path=configs/org-policies/boolean.yaml
compute.disableGuestAttributesAccess:
enforce: true
constraints/compute.skipDefaultNetworkCreation:
enforce: true
iam.disableServiceAccountKeyCreation:
enforce: true
iam.disableServiceAccountKeyUpload:
enforce: false
rules:
- condition:
expression: resource.matchTagId("tagKeys/1234", "tagValues/1234")
title: condition
description: test condition
location: xxx
enforce: true
- condition:
description: test condition
expression: resource.matchTagId("tagKeys/1234", "tagValues/1234")
location: somewhere
title: condition
enforce: true
```
```yaml
# tftest file list configs/org-policies/list.yaml
compute.vmExternalIpAccess:
deny:
all: true
iam.allowedPolicyMemberDomains:
# tftest-file id=list path=configs/org-policies/list.yaml
constraints/compute.trustedImageProjects:
allow:
values:
- C0xxxxxxx
- C0yyyyyyy
compute.restrictLoadBalancerCreationForTypes:
- projects/my-project
constraints/compute.vmExternalIpAccess:
deny:
values: ["in:EXTERNAL"]
rules:
- condition:
expression: resource.matchTagId("tagKeys/1234", "tagValues/1234")
title: condition
description: test condition
allow:
values: ["in:EXTERNAL"]
- condition:
expression: resource.matchTagId("tagKeys/12345", "tagValues/12345")
title: condition2
description: test condition2
allow:
all: true
all: true
constraints/iam.allowedPolicyMemberDomains:
allow:
values:
- C0xxxxxxx
- C0yyyyyyy
```
## Logging Sinks (in same project)
## Logging Sinks
```hcl
module "gcs" {
@ -339,49 +347,18 @@ module "project-host" {
no-gce-instances = "resource.type=gce_instance"
}
}
# tftest modules=5 resources=14
# tftest modules=5 resources=14 inventory=logging.yaml
```
## Logging Sinks (in different project)
When writing to destinations in a different project, set `unique_writer` to `true`.
```hcl
module "gcs" {
source = "./fabric/modules/gcs"
project_id = "project-1"
name = "gcs_sink"
force_destroy = true
}
module "project-host" {
source = "./fabric/modules/project"
name = "project-2"
billing_account = "123456-123456-123456"
parent = "folders/1234567890"
logging_sinks = {
warnings = {
destination = module.gcs.id
filter = "severity=WARNING"
unique_writer = true
type = "storage"
}
}
}
# tftest modules=2 resources=4
```
## Cloud KMS encryption keys
The module offers a simple, centralized way to assign `roles/cloudkms.cryptoKeyEncrypterDecrypter` to service identities.
```hcl
module "project" {
source = "./fabric/modules/project"
name = "my-project"
billing_account = "123456-123456-123456"
prefix = "foo"
source = "./fabric/modules/project"
name = "my-project"
prefix = "foo"
services = [
"compute.googleapis.com",
"storage.googleapis.com"
@ -409,8 +386,8 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = null
description = "Environment specification."
iam = null
values = {
dev = null
prod = null
@ -438,8 +415,8 @@ One non-obvious output is `service_accounts`, which offers a simple way to disco
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
services = [
"compute.googleapis.com"
]
@ -448,7 +425,7 @@ module "project" {
output "compute_robot" {
value = module.project.service_accounts.robots.compute
}
# tftest modules=1 resources=2
# tftest modules=1 resources=2 inventory:outputs.yaml
```
<!-- TFDOC OPTS files:1 -->

View File

@ -31,7 +31,7 @@ output "folders" {
module "my-dev" {
source = "./fabric/modules/projects-data-source"
parent = "folders/123456789"
filter = "labels.env:DEV lifecycleState:ACTIVE"
filter = "labels.env:DEV lifecycleState:ACTIVE"
}
output "dev-projects" {

View File

@ -28,7 +28,7 @@ module "topic_with_schema" {
name = "my-topic"
schema = {
msg_encoding = "JSON"
schema_type = "AVRO"
schema_type = "AVRO"
definition = jsonencode({
"type" = "record",
"name" = "Avro",

View File

@ -16,7 +16,7 @@ The secret replication policy is automatically managed if no location is set, or
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}
@ -32,12 +32,12 @@ IAM bindings can be set per secret in the same way as for most other modules sup
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}
iam = {
test-auto = {
test-auto = {
"roles/secretmanager.secretAccessor" = ["group:auto-readers@example.com"]
}
test-manual = {
@ -56,7 +56,7 @@ As mentioned above, please be aware that **version data will be stored in state
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}

View File

@ -11,10 +11,10 @@ It can be used in conjunction with the [DNS](../dns) module to create [service-d
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
iam = {
"roles/servicedirectory.editor" = [
"serviceAccount:namespace-editor@example.com"
@ -28,10 +28,10 @@ module "service-directory" {
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
services = {
one = {
endpoints = ["first", "second"]
@ -59,9 +59,9 @@ Wiring a service directory namespace to a private DNS zone allows querying the n
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "apps"
iam = {
"roles/servicedirectory.editor" = [

View File

@ -27,16 +27,16 @@ module "repo" {
name = "my-repo"
triggers = {
foo = {
filename = "ci/workflow-foo.yaml"
included_files = ["**/*tf"]
filename = "ci/workflow-foo.yaml"
included_files = ["**/*tf"]
service_account = null
substitutions = {
BAR = 1
}
template = {
branch_name = "main"
project_id = null
tag_name = null
project_id = null
tag_name = null
}
}
}

View File

@ -120,7 +120,7 @@ module "test" {
to = {
operations = [{
method_selectors = ["*"]
service_name = "storage.googleapis.com"
service_name = "storage.googleapis.com"
}]
resources = ["projects/123456789"]
}

View File

@ -20,4 +20,4 @@ instances = {
}
psc_config = {
europe-west1 = "10.0.0.0/28"
}
}

View File

@ -0,0 +1,17 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
counts:
modules: 9
resources: 59

View File

@ -1,28 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "test" {
source = "../../../../../blueprints/cloud-operations/apigee"
project_create = var.project_create
project_id = var.project_id
organization = var.organization
envgroups = var.envgroups
environments = var.environments
instances = var.instances
path = var.path
datastore_name = var.datastore_name
psc_config = var.psc_config
}

View File

@ -1,123 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
variable "project_create" {
description = "Parameters for the creation of the new project."
type = object({
billing_account_id = string
parent = string
})
default = null
}
variable "vpc_create" {
description = "Boolean flag indicating whether the VPC should be created or not."
type = bool
default = true
}
variable "project_id" {
description = "Project ID."
type = string
nullable = false
}
variable "organization" {
description = "Apigee organization."
type = object({
display_name = optional(string, "Apigee organization created by tf module")
description = optional(string, "Apigee organization created by tf module")
authorized_network = optional(string, "vpc")
runtime_type = optional(string, "CLOUD")
billing_type = optional(string)
database_encryption_key = optional(string)
analytics_region = optional(string, "europe-west1")
})
nullable = false
default = {
}
}
variable "envgroups" {
description = "Environment groups (NAME => [HOSTNAMES])."
type = map(list(string))
nullable = false
}
variable "environments" {
description = "Environments."
type = map(object({
display_name = optional(string)
description = optional(string)
node_config = optional(object({
min_node_count = optional(number)
max_node_count = optional(number)
}))
iam = optional(map(list(string)))
envgroups = list(string)
}))
nullable = false
}
variable "instances" {
description = "Instance."
type = map(object({
display_name = optional(string)
description = optional(string)
region = string
environments = list(string)
psa_ip_cidr_range = string
disk_encryption_key = optional(string)
consumer_accept_list = optional(list(string))
}))
nullable = false
}
variable "path" {
description = "Bucket path."
type = string
default = "/analytics"
nullable = false
}
variable "datastore_name" {
description = "Datastore"
type = string
nullable = false
default = "gcs"
}
variable "psc_config" {
description = "PSC configuration."
type = map(string)
nullable = false
}

View File

@ -12,10 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
module: blueprints/cloud-operations/apigee
def test_blueprint(recursive_e2e_plan_runner):
"Test that all blueprint resources are created."
count_modules, count_resources = recursive_e2e_plan_runner(tf_var_file='test.regular.tfvars')
assert count_modules == 10
assert count_resources == 59
tests:
basic:

View File

@ -11,52 +11,50 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest configuration for testing code examples."""
import collections
import re
from pathlib import Path
import marko
import pytest
FABRIC_ROOT = Path(__file__).parents[2]
BLUEPRINTS_PATH = FABRIC_ROOT / 'blueprints/'
MODULES_PATH = FABRIC_ROOT / 'modules/'
SUBMODULES_PATH = MODULES_PATH / 'cloud-config-container'
FILE_TEST_RE = re.compile(r'# tftest file (\w+) ([\S]+)')
FILE_TEST_RE = re.compile(r'# tftest-file +id=(\w+) +path=([\S]+)')
Example = collections.namedtuple('Example', 'code files')
Example = collections.namedtuple('Example', 'name code module files')
File = collections.namedtuple('File', 'path content')
def pytest_generate_tests(metafunc):
"""Find all README.md files and collect code examples tagged for testing."""
if 'example' in metafunc.fixturenames:
modules = [x for x in MODULES_PATH.iterdir() if x.is_dir()]
modules.extend(x for x in SUBMODULES_PATH.iterdir() if x.is_dir())
modules.extend(x for x in BLUEPRINTS_PATH.glob("*/*") if x.is_dir())
modules.sort()
readmes = FABRIC_ROOT.glob('**/README.md')
examples = []
ids = []
for module in modules:
readme = module / 'README.md'
if not readme.exists():
continue
for readme in readmes:
module = readme.parent
doc = marko.parse(readme.read_text())
index = 0
last_header = None
files = {}
files = collections.defaultdict(dict)
# first pass: collect all tftest tagged files
# first pass: collect all examples tagged with tftest-file
last_header = None
for child in doc.children:
if isinstance(child, marko.block.FencedCode):
code = child.children[0].children
match = FILE_TEST_RE.search(code)
if match:
name, path = match.groups()
files[name] = File(path, code)
files[last_header][name] = File(path, code)
elif isinstance(child, marko.block.Heading):
last_header = child.children[0].children
# second pass: collect all examples tagged with tftest
last_header = None
index = 0
for child in doc.children:
if isinstance(child, marko.block.FencedCode):
index += 1
@ -64,12 +62,12 @@ def pytest_generate_tests(metafunc):
if 'tftest skip' in code:
continue
if child.lang == 'hcl':
examples.append(Example(code, files))
path = module.relative_to(FABRIC_ROOT)
name = f'{path}:{last_header}'
if index > 1:
name += f' {index}'
ids.append(name)
examples.append(Example(name, code, path, files[last_header]))
elif isinstance(child, marko.block.Heading):
last_header = child.children[0].children
index = 0

View File

@ -13,19 +13,24 @@
# limitations under the License.
import re
import subprocess
from pathlib import Path
BASE_PATH = Path(__file__).parent
COUNT_TEST_RE = re.compile(
r'# tftest modules=(\d+) resources=(\d+)(?: files=([\w,]+))?')
COUNT_TEST_RE = re.compile(r'# tftest +modules=(\d+) +resources=(\d+)' +
r'(?: +files=([\w,-.]+))?' +
r'(?: +inventory=([\w\-.]+))?')
def test_example(recursive_e2e_plan_runner, tmp_path, example):
def test_example(plan_validator, tmp_path, example):
if match := COUNT_TEST_RE.search(example.code):
(tmp_path / 'fabric').symlink_to(Path(BASE_PATH, '../../'))
(tmp_path / 'variables.tf').symlink_to(Path(BASE_PATH, 'variables.tf'))
(tmp_path / 'fabric').symlink_to(BASE_PATH.parents[1])
(tmp_path / 'variables.tf').symlink_to(BASE_PATH / 'variables.tf')
(tmp_path / 'main.tf').write_text(example.code)
expected_modules = int(match.group(1))
expected_resources = int(match.group(2))
if match.group(3) is not None:
requested_files = match.group(3).split(',')
for f in requested_files:
@ -33,13 +38,28 @@ def test_example(recursive_e2e_plan_runner, tmp_path, example):
destination.parent.mkdir(parents=True, exist_ok=True)
destination.write_text(example.files[f].content)
expected_modules = int(match.group(1)) if match is not None else 1
expected_resources = int(match.group(2)) if match is not None else 1
inventory = []
if match.group(4) is not None:
python_test_path = str(example.module).replace('-', '_')
inventory = BASE_PATH.parent / python_test_path / 'examples'
inventory = inventory / match.group(4)
num_modules, num_resources = recursive_e2e_plan_runner(
str(tmp_path), tmpdir=False)
# TODO: force plan_validator to never copy files (we're already
# running from a temp dir)
summary = plan_validator(module_path=tmp_path, inventory_paths=inventory,
tf_var_files=[])
counts = summary.counts
num_modules, num_resources = counts['modules'], counts['resources']
assert expected_modules == num_modules, 'wrong number of modules'
assert expected_resources == num_resources, 'wrong number of resources'
# TODO(jccb): this should probably be done in check_documentation
# but we already have all the data here.
result = subprocess.run(
'terraform fmt -check -diff -no-color main.tf'.split(), cwd=tmp_path,
stdout=subprocess.PIPE, encoding='utf-8')
assert result.returncode == 0, f'terraform code not formatted correctly\n{result.stdout}'
else:
assert False, "can't find tftest directive"

View File

@ -19,7 +19,7 @@ variable "bucket" {
}
variable "billing_account_id" {
default = "billing_account_id"
default = "123456-123456-123456"
}
variable "kms_key" {

View File

@ -0,0 +1,29 @@
automation = {
federated_identity_pool = null
federated_identity_providers = null
project_id = "fast-prod-automation"
project_number = 123456
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
# organization_iam_admin = "organizations/123456789012/roles/organizationIamAdmin",
service_project_network_admin = "organizations/123456789012/roles/xpnServiceAdmin"
}
groups = {
gcp-billing-admins = "gcp-billing-admins",
gcp-devops = "gcp-devops",
gcp-network-admins = "gcp-network-admins",
gcp-organization-admins = "gcp-organization-admins",
gcp-security-admins = "gcp-security-admins",
gcp-support = "gcp-support"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"

View File

@ -1,48 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "stage" {
source = "../../../../../fast/stages/01-resman"
automation = {
federated_identity_pool = null
federated_identity_providers = null
project_id = "fast-prod-automation"
project_number = 123456
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
# organization_iam_admin = "organizations/123456789012/roles/organizationIamAdmin",
service_project_network_admin = "organizations/123456789012/roles/xpnServiceAdmin"
}
groups = {
gcp-billing-admins = "gcp-billing-admins",
gcp-devops = "gcp-devops",
gcp-network-admins = "gcp-network-admins",
gcp-organization-admins = "gcp-organization-admins",
gcp-security-admins = "gcp-security-admins",
gcp-support = "gcp-support"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/01-resman",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,30 @@
data_dir = "../../../fast/stages/02-networking-nva/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
gke-dev = "string"
gke-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"

View File

@ -1,49 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "stage" {
source = "../../../../../fast/stages/02-networking-nva"
data_dir = "../../../../../fast/stages/02-networking-nva/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
gke-dev = "string"
gke-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/02-networking-nva",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,35 @@
data_dir = "../../../fast/stages/02-networking-peering/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
region_trigram = {
europe-west1 = "ew1"
europe-west3 = "ew3"
europe-west8 = "ew8"
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
gke-dev = "string"
gke-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"

View File

@ -1,54 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "stage" {
source = "../../../../../fast/stages/02-networking-peering"
data_dir = "../../../../../fast/stages/02-networking-peering/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
region_trigram = {
europe-west1 = "ew1"
europe-west3 = "ew3"
europe-west8 = "ew8"
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
gke-dev = "string"
gke-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"
}

View File

@ -27,24 +27,27 @@ STAGE_PEERING = STAGES / '02-networking-peering'
STAGE_VPN = STAGES / '02-networking-vpn'
def test_counts(recursive_e2e_plan_runner):
'Test stage.'
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
def test_counts(plan_summary):
"Test stage."
summary = plan_summary("fast/stages/02-networking-peering",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0
def test_vpn_peering_parity(e2e_plan_runner):
def test_vpn_peering_parity(plan_summary):
'''Ensure VPN- and peering-based networking stages are identical except
for VPN and VPC peering resources'''
_, plan_peering = e2e_plan_runner(fixture_path=FIXTURE_PEERING)
_, plan_vpn = e2e_plan_runner(fixture_path=FIXTURE_VPN)
summary_peering = plan_summary("fast/stages/02-networking-peering",
tf_var_files=["common.tfvars"])
summary_vpn = plan_summary("fast/stages/02-networking-vpn",
tf_var_files=["common.tfvars"])
ddiff = DeepDiff(plan_vpn, plan_peering, ignore_order=True,
group_by='address', view='tree')
ddiff = DeepDiff(summary_vpn.values, summary_peering.values,
ignore_order=True)
removed_types = {x.t1['type'] for x in ddiff['dictionary_item_removed']}
added_types = {x.t2['type'] for x in ddiff['dictionary_item_added']}
removed_types = {x.split('.')[-2] for x in ddiff['dictionary_item_removed']}
added_types = {x.split('.')[-2] for x in ddiff['dictionary_item_added']}
assert added_types == {'google_compute_network_peering'}
assert removed_types == {

View File

@ -0,0 +1,28 @@
data_dir = "../../../../../fast/stages/02-networking-separate-envs/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"

View File

@ -1,47 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "stage" {
source = "../../../../../fast/stages/02-networking-separate-envs"
data_dir = "../../../../../fast/stages/02-networking-separate-envs/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/02-networking-separate-envs",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,35 @@
data_dir = "../../../../../fast/stages/02-networking-vpn/data/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
custom_roles = {
service_project_network_admin = "organizations/123456789012/roles/foo"
}
folder_ids = {
networking = null
networking-dev = null
networking-prod = null
}
region_trigram = {
europe-west1 = "ew1"
europe-west3 = "ew3"
europe-west8 = "ew8"
}
service_accounts = {
data-platform-dev = "string"
data-platform-prod = "string"
gke-dev = "string"
gke-prod = "string"
project-factory-dev = "string"
project-factory-prod = "string"
}
organization = {
domain = "fast.example.com"
id = 123456789012
customer_id = "C00000000"
}
prefix = "fast2"

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/02-networking-vpn",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,88 @@
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
folder_ids = {
security = null
}
organization = {
domain = "gcp-pso-italy.net"
id = 856933387836
customer_id = "C01lmug8b"
}
prefix = "fast"
kms_keys = {
compute = {
iam = {
"roles/cloudkms.admin" = ["user:user1@example.com"]
}
labels = { service = "compute" }
locations = null
rotation_period = null
}
}
service_accounts = {
security = "foobar@iam.gserviceaccount.com"
data-platform-dev = "foobar@iam.gserviceaccount.com"
data-platform-prod = "foobar@iam.gserviceaccount.com"
project-factory-dev = "foobar@iam.gserviceaccount.com"
project-factory-prod = "foobar@iam.gserviceaccount.com"
}
vpc_sc_access_levels = {
onprem = {
conditions = [{
ip_subnetworks = ["101.101.101.0/24"]
}]
}
}
vpc_sc_egress_policies = {
iac-gcs = {
from = {
identities = [
"serviceAccount:xxx-prod-resman-security-0@xxx-prod-iac-core-0.iam.gserviceaccount.com"
]
}
to = {
operations = [{
method_selectors = ["*"]
service_name = "storage.googleapis.com"
}]
resources = ["projects/123456782"]
}
}
}
vpc_sc_ingress_policies = {
iac = {
from = {
identities = [
"serviceAccount:xxx-prod-resman-security-0@xxx-prod-iac-core-0.iam.gserviceaccount.com"
]
access_levels = ["*"]
}
to = {
operations = [{ method_selectors = [], service_name = "*" }]
resources = ["*"]
}
}
}
vpc_sc_perimeters = {
dev = {
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/1111111111"]
}
dev = {
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/0000000000"]
}
dev = {
access_levels = ["onprem"]
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/2222222222"]
}
}

View File

@ -1,107 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "stage" {
source = "../../../../../fast/stages/02-security"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
folder_ids = {
security = null
}
organization = {
domain = "gcp-pso-italy.net"
id = 856933387836
customer_id = "C01lmug8b"
}
prefix = "fast"
kms_keys = {
compute = {
iam = {
"roles/cloudkms.admin" = ["user:user1@example.com"]
}
labels = { service = "compute" }
locations = null
rotation_period = null
}
}
service_accounts = {
security = "foobar@iam.gserviceaccount.com"
data-platform-dev = "foobar@iam.gserviceaccount.com"
data-platform-prod = "foobar@iam.gserviceaccount.com"
project-factory-dev = "foobar@iam.gserviceaccount.com"
project-factory-prod = "foobar@iam.gserviceaccount.com"
}
vpc_sc_access_levels = {
onprem = {
conditions = [{
ip_subnetworks = ["101.101.101.0/24"]
}]
}
}
vpc_sc_egress_policies = {
iac-gcs = {
from = {
identities = [
"serviceAccount:xxx-prod-resman-security-0@xxx-prod-iac-core-0.iam.gserviceaccount.com"
]
}
to = {
operations = [{
method_selectors = ["*"]
service_name = "storage.googleapis.com"
}]
resources = ["projects/123456782"]
}
}
}
vpc_sc_ingress_policies = {
iac = {
from = {
identities = [
"serviceAccount:xxx-prod-resman-security-0@xxx-prod-iac-core-0.iam.gserviceaccount.com"
]
access_levels = ["*"]
}
to = {
operations = [{ method_selectors = [], service_name = "*" }]
resources = ["*"]
}
}
}
vpc_sc_perimeters = {
dev = {
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/1111111111"]
}
dev = {
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/0000000000"]
}
dev = {
access_levels = ["onprem"]
egress_policies = ["iac-gcs"]
ingress_policies = ["iac"]
resources = ["projects/2222222222"]
}
}
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/02-security",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,26 @@
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "012345-67890A-BCDEF0",
organization_id = 123456
}
folder_ids = {
data-platform-dev = "folders/12345678"
}
host_project_ids = {
dev-spoke-0 = "fast-dev-net-spoke-0"
}
organization = {
domain = "example.com"
id = 123456789012
customer_id = "A11aaaaa1"
}
prefix = "fast"
subnet_self_links = {
dev-spoke-0 = {
"europe-west1/dev-dataplatform-ew1" : "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/regions/europe-west1/subnetworks/dev-dataplatform-ew1",
"europe-west1/dev-default-ew1" : "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/regions/europe-west1/subnetworks/dev-default-ew1"
}
}
vpc_self_links = { dev-spoke-0 = "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/global/networks/dev-spoke-0" }

View File

@ -1,47 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
# tfdoc: Data platform stage test
module "stage" {
source = "../../../../../fast/stages/03-data-platform/dev/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "012345-67890A-BCDEF0",
organization_id = 123456
}
folder_ids = {
data-platform-dev = "folders/12345678"
}
host_project_ids = {
dev-spoke-0 = "fast-dev-net-spoke-0"
}
organization = {
domain = "example.com"
id = 123456789012
customer_id = "A11aaaaa1"
}
prefix = "fast"
subnet_self_links = {
dev-spoke-0 = {
"europe-west1/dev-dataplatform-ew1" : "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/regions/europe-west1/subnetworks/dev-dataplatform-ew1",
"europe-west1/dev-default-ew1" : "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/regions/europe-west1/subnetworks/dev-default-ew1"
}
}
vpc_self_links = { dev-spoke-0 = "https://www.googleapis.com/compute/v1/projects/fast-dev-net-spoke-0/global/networks/dev-spoke-0" }
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/03-data-platform/dev/",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,41 @@
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "012345-67890A-BCDEF0",
organization_id = 123456
}
clusters = {
mycluster = {
cluster_autoscaling = null
description = "my cluster"
dns_domain = null
location = "europe-west1"
labels = {}
private_cluster_config = {
enable_private_endpoint = true
master_global_access = true
}
vpc_config = {
subnetwork = "projects/prj-host/regions/europe-west1/subnetworks/gke-0"
master_ipv4_cidr_block = "172.16.20.0/28"
}
}
}
nodepools = {
mycluster = {
mynodepool = {
node_count = { initial = 1 }
}
}
}
folder_ids = {
gke-dev = "folders/12345678"
}
host_project_ids = {
dev-spoke-0 = "fast-dev-net-spoke-0"
}
prefix = "fast"
vpc_self_links = {
dev-spoke-0 = "projects/fast-dev-net-spoke-0/global/networks/dev-spoke-0"
}

View File

@ -1,62 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
# tfdoc: Data platform stage test
module "stage" {
source = "../../../../../fast/stages/03-gke-multitenant/dev/"
automation = {
outputs_bucket = "test"
}
billing_account = {
id = "012345-67890A-BCDEF0",
organization_id = 123456
}
clusters = {
mycluster = {
cluster_autoscaling = null
description = "my cluster"
dns_domain = null
location = "europe-west1"
labels = {}
private_cluster_config = {
enable_private_endpoint = true
master_global_access = true
}
vpc_config = {
subnetwork = "projects/prj-host/regions/europe-west1/subnetworks/gke-0"
master_ipv4_cidr_block = "172.16.20.0/28"
}
}
}
nodepools = {
mycluster = {
mynodepool = {
node_count = { initial = 1 }
}
}
}
folder_ids = {
gke-dev = "folders/12345678"
}
host_project_ids = {
dev-spoke-0 = "fast-dev-net-spoke-0"
}
prefix = "fast"
vpc_self_links = {
dev-spoke-0 = "projects/fast-dev-net-spoke-0/global/networks/dev-spoke-0"
}
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/03-gke-multitenant/dev/",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -0,0 +1,11 @@
data_dir = "../../../../tests/fast/stages/s03_project_factory/data/projects/"
defaults_file = "../../../../tests/fast/stages/s03_project_factory/data/defaults.yaml"
prefix = "test"
environment_dns_zone = "dev"
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
vpc_self_links = {
dev-spoke-0 = "link"
}

View File

@ -1,32 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "projects" {
source = "../../../../../fast/stages/03-project-factory/dev"
data_dir = "./data/projects/"
defaults_file = "./data/defaults.yaml"
prefix = "test"
environment_dns_zone = "dev"
billing_account = {
id = "000000-111111-222222"
organization_id = 123456789012
}
vpc_self_links = {
dev-spoke-0 = "link"
}
}

View File

@ -13,8 +13,9 @@
# limitations under the License.
def test_counts(recursive_e2e_plan_runner):
def test_counts(plan_summary):
"Test stage."
num_modules, num_resources = recursive_e2e_plan_runner()
# TODO: to re-enable per-module resource count check print _, then test
assert num_modules > 0 and num_resources > 0
summary = plan_summary("fast/stages/03-project-factory/dev",
tf_var_files=["common.tfvars"])
assert summary.counts["modules"] > 0
assert summary.counts["resources"] > 0

View File

@ -103,6 +103,7 @@ def plan_summary(module_path, basedir, tf_var_files=None, **tf_vars):
# compute resource type counts and address->values map
values = {}
counts = collections.defaultdict(int)
counts['modules'] = counts['resources'] = 0
q = collections.deque([plan.root_module])
while q:
e = q.popleft()
@ -113,8 +114,10 @@ def plan_summary(module_path, basedir, tf_var_files=None, **tf_vars):
values[e['address']] = e['values']
for x in e.get('resources', []):
counts['resources'] += 1
q.append(x)
for x in e.get('child_modules', []):
counts['modules'] += 1
q.append(x)
# extract planned outputs
@ -224,7 +227,7 @@ def plan_validator_fixture(request):
basedir = Path(request.fspath).parent
return plan_validator(module_path=module_path,
inventory_paths=inventory_paths, basedir=basedir,
tf_var_files=tf_var_paths, **tf_vars)
tf_var_files=tf_var_files, **tf_vars)
return inner

View File

@ -96,34 +96,6 @@ def e2e_plan_runner(_plan_runner):
return run_plan
@pytest.fixture(scope='session')
def recursive_e2e_plan_runner(_plan_runner):
"""
Plan runner for end-to-end root module, returns total number of
(nested) modules and resources
"""
def walk_plan(node, modules, resources):
new_modules = node.get('child_modules', [])
resources += node.get('resources', [])
modules += new_modules
for module in new_modules:
walk_plan(module, modules, resources)
def run_plan(fixture_path=None, tf_var_file=None, targets=None, refresh=True,
include_bare_resources=False, compute_sums=True, tmpdir=True,
**tf_vars):
'Run Terraform plan on a root module using defaults, returns data.'
plan = _plan_runner(fixture_path, tf_var_file=tf_var_file, targets=targets,
refresh=refresh, tmpdir=tmpdir, **tf_vars)
modules = []
resources = []
walk_plan(plan.root_module, modules, resources)
return len(modules), len(resources)
return run_plan
@pytest.fixture(scope='session')
def apply_runner():
'Return a function to run Terraform apply on a fixture.'

View File

@ -0,0 +1,39 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
values:
module.myproject-default-service-accounts.google_project_iam_member.project-roles["myproject-roles/logging.logWriter"]:
condition: []
project: myproject
role: roles/logging.logWriter
module.myproject-default-service-accounts.google_project_iam_member.project-roles["myproject-roles/monitoring.metricWriter"]:
condition: []
project: myproject
role: roles/monitoring.metricWriter
module.myproject-default-service-accounts.google_service_account.service_account[0]:
account_id: vm-default
description: null
disabled: false
display_name: Terraform-managed.
project: myproject
timeouts: null
module.myproject-default-service-accounts.google_service_account_iam_binding.roles["roles/iam.serviceAccountUser"]:
condition: []
members:
- user:foo@example.com
role: roles/iam.serviceAccountUser
counts:
google_project_iam_member: 2
google_service_account: 1
google_service_account_iam_binding: 1

View File

@ -1,29 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module "test" {
source = "../../../../modules/iam-service-account"
project_id = var.project_id
name = "sa-one"
prefix = var.prefix
generate_key = var.generate_key
iam = var.iam
iam_billing_roles = var.iam_billing_roles
iam_folder_roles = var.iam_folder_roles
iam_organization_roles = var.iam_organization_roles
iam_project_roles = var.iam_project_roles
iam_storage_roles = var.iam_storage_roles
}

View File

@ -1,60 +0,0 @@
/**
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
variable "generate_key" {
type = bool
default = false
}
variable "iam" {
type = map(list(string))
default = {}
}
variable "iam_billing_roles" {
type = map(list(string))
default = {}
}
variable "iam_folder_roles" {
type = map(list(string))
default = {}
}
variable "iam_organization_roles" {
type = map(list(string))
default = {}
}
variable "iam_project_roles" {
type = map(list(string))
default = {}
}
variable "iam_storage_roles" {
type = map(list(string))
default = {}
}
variable "prefix" {
type = string
default = null
}
variable "project_id" {
type = string
default = "my-project"
}

View File

@ -1,42 +0,0 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_resources(plan_runner):
"Test service account resource."
_, resources = plan_runner()
assert len(resources) == 1
resource = resources[0]
assert resource['type'] == 'google_service_account'
assert resource['values']['account_id'] == 'sa-one'
_, resources = plan_runner(prefix='foo')
assert len(resources) == 1
resource = resources[0]
assert resource['values']['account_id'] == 'foo-sa-one'
def test_iam_roles(plan_runner):
"Test iam roles with one member."
iam = ('{"roles/iam.serviceAccountUser" = ["user:a@b.com"]}')
_, resources = plan_runner(iam=iam)
assert len(resources) == 2
iam_resources = [r for r in resources
if r['type'] != 'google_service_account']
assert len(iam_resources) == 1
iam_resource = iam_resources[0]
assert iam_resource['type'] == 'google_service_account_iam_binding'
assert iam_resource['index'] == 'roles/iam.serviceAccountUser'
assert iam_resource['values']['role'] == 'roles/iam.serviceAccountUser'
assert iam_resource['values']['members'] == ["user:a@b.com"]

View File

@ -15,7 +15,7 @@
ingress:
allow-healthchecks:
description: Allow ingress from healthchecks.
source_ranges:
ranges:
- healthchecks
targets: ["lb-backends"]
rules:

View File

@ -0,0 +1,146 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
values:
module.org.google_org_policy_policy.default["compute.disableGuestAttributesAccess"]:
name: organizations/1234567890/policies/compute.disableGuestAttributesAccess
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: 'TRUE'
values: []
module.org.google_org_policy_policy.default["constraints/compute.skipDefaultNetworkCreation"]:
name: organizations/1234567890/policies/constraints/compute.skipDefaultNetworkCreation
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: 'TRUE'
values: []
module.org.google_org_policy_policy.default["constraints/compute.trustedImageProjects"]:
name: organizations/1234567890/policies/constraints/compute.trustedImageProjects
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: null
values:
- allowed_values:
- projects/my-project
denied_values: null
module.org.google_org_policy_policy.default["constraints/compute.vmExternalIpAccess"]:
name: organizations/1234567890/policies/constraints/compute.vmExternalIpAccess
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: 'TRUE'
enforce: null
values: []
module.org.google_org_policy_policy.default["constraints/iam.allowedPolicyMemberDomains"]:
name: organizations/1234567890/policies/constraints/iam.allowedPolicyMemberDomains
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: null
values:
- allowed_values:
- C0xxxxxxx
- C0yyyyyyy
denied_values: null
module.org.google_org_policy_policy.default["iam.disableServiceAccountKeyCreation"]:
name: organizations/1234567890/policies/iam.disableServiceAccountKeyCreation
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: 'TRUE'
values: []
module.org.google_org_policy_policy.default["iam.disableServiceAccountKeyUpload"]:
name: organizations/1234567890/policies/iam.disableServiceAccountKeyUpload
parent: organizations/1234567890
spec:
- inherit_from_parent: null
reset: null
rules:
- allow_all: null
condition: []
deny_all: null
enforce: 'FALSE'
values: []
- allow_all: null
condition:
- description: test condition
expression: resource.matchTagId("tagKeys/1234", "tagValues/1234")
location: somewhere
title: condition
deny_all: null
enforce: 'TRUE'
values: []
module.org.google_organization_iam_binding.authoritative["roles/owner"]:
condition: []
members:
- group:cloud-owners@example.org
org_id: '1234567890'
role: roles/owner
module.org.google_organization_iam_binding.authoritative["roles/projectCreator"]:
condition: []
members:
- group:cloud-owners@example.org
org_id: '1234567890'
role: roles/projectCreator
module.org.google_organization_iam_binding.authoritative["roles/resourcemanager.projectCreator"]:
condition: []
members:
- group:cloud-admins@example.org
org_id: '1234567890'
role: roles/resourcemanager.projectCreator
module.org.google_organization_iam_member.additive["roles/compute.admin-user:compute@example.org"]:
condition: []
member: user:compute@example.org
org_id: '1234567890'
role: roles/compute.admin
module.org.google_organization_iam_member.additive["roles/container.viewer-user:compute@example.org"]:
condition: []
member: user:compute@example.org
org_id: '1234567890'
role: roles/container.viewer
counts:
google_org_policy_policy: 8
google_organization_iam_binding: 3

Some files were not shown because too many files have changed in this diff Show More