Enforce terraform fmt in examples

This commit is contained in:
Julio Castillo 2022-12-16 12:53:56 +01:00
parent 0faf8ae1f1
commit e700a27079
45 changed files with 368 additions and 360 deletions

View File

@ -49,8 +49,8 @@ locals {
trimsuffix(f, ".yaml") => yamldecode(file("${local._data_dir}/${f}"))
}
# these are usually set via variables
_base_dir = "./fabric/blueprints/factories/project-factory"
_data_dir = "${local._base_dir}/sample-data/projects/"
_base_dir = "./fabric/blueprints/factories/project-factory"
_data_dir = "${local._base_dir}/sample-data/projects/"
_defaults_file = "${local._base_dir}/sample-data/defaults.yaml"
}

View File

@ -78,7 +78,7 @@ module "gke-fleet" {
location = "europe-west1"
private_cluster_config = local.cluster_defaults.private_cluster_config
vpc_config = {
subnetwork = local.subnet_self_links.ew1
subnetwork = local.subnet_self_links.ew1
master_ipv4_cidr_block = "172.16.10.0/28"
}
}
@ -86,7 +86,7 @@ module "gke-fleet" {
location = "europe-west3"
private_cluster_config = local.cluster_defaults.private_cluster_config
vpc_config = {
subnetwork = local.subnet_self_links.ew3
subnetwork = local.subnet_self_links.ew3
master_ipv4_cidr_block = "172.16.20.0/28"
}
}
@ -95,16 +95,16 @@ module "gke-fleet" {
cluster-0 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
spot = true
spot = true
}
}
}
cluster-1 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
}
}
@ -143,13 +143,13 @@ module "gke" {
prefix = "myprefix"
clusters = {
cluster-0 = {
location = "europe-west1"
location = "europe-west1"
vpc_config = {
subnetwork = local.subnet_self_links.ew1
}
}
cluster-1 = {
location = "europe-west3"
location = "europe-west3"
vpc_config = {
subnetwork = local.subnet_self_links.ew3
}
@ -159,16 +159,16 @@ module "gke" {
cluster-0 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
spot = true
spot = true
}
}
}
cluster-1 = {
nodepool-0 = {
node_config = {
disk_type = "pd-balanced"
disk_type = "pd-balanced"
machine_type = "n2-standard-4"
}
}
@ -205,14 +205,14 @@ module "gke" {
enable_hierarchical_resource_quota = true
enable_pod_tree_labels = true
}
policy_controller = {
policy_controller = {
audit_interval_seconds = 30
exemptable_namespaces = ["kube-system"]
log_denies_enabled = true
referential_rules_enabled = true
template_library_installed = true
}
version = "1.10.2"
version = "1.10.2"
}
}
fleet_configmanagement_clusters = {

View File

@ -6,11 +6,11 @@ This module allows creating an API with its associated API config and API gatewa
## Basic example
```hcl
module "gateway" {
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
# The OpenAPI spec contents
# ...
EOT
@ -31,7 +31,7 @@ module "gateway" {
EOT
service_account_email = "sa@my-project.iam.gserviceaccount.com"
iam = {
"roles/apigateway.admin" = [ "user:user@example.com" ]
"roles/apigateway.admin" = ["user:user@example.com"]
}
}
# tftest modules=1 resources=7
@ -40,18 +40,18 @@ module "gateway" {
## Basic example + service account creation
```hcl
module "gateway" {
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
source = "./fabric/modules/api-gateway"
project_id = "my-project"
api_id = "api"
region = "europe-west1"
spec = <<EOT
# The OpenAPI spec contents
# ...
EOT
service_account_create = true
iam = {
"roles/apigateway.admin" = [ "user:mirene@google.com" ]
"roles/apigateway.viewer" = [ "user:mirene@google.com" ]
"roles/apigateway.admin" = ["user:mirene@google.com"]
"roles/apigateway.viewer" = ["user:mirene@google.com"]
}
}
# tftest modules=1 resources=11

View File

@ -25,14 +25,14 @@ module "apigee" {
}
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
apis-prod = {
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
iam = {
"roles/viewer" = ["group:devops@myorg.com"]
}
@ -71,10 +71,10 @@ module "apigee" {
source = "./fabric/modules/apigee"
project_id = "my-project"
organization = {
display_name = "My Organization"
description = "My Organization"
runtime_type = "HYBRID"
analytics_region = "europe-west1"
display_name = "My Organization"
description = "My Organization"
runtime_type = "HYBRID"
analytics_region = "europe-west1"
}
envgroups = {
test = ["test.example.com"]
@ -82,14 +82,14 @@ module "apigee" {
}
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
apis-prod = {
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
display_name = "APIs prod"
description = "APIs prod"
envgroups = ["prod"]
iam = {
"roles/viewer" = ["group:devops@myorg.com"]
}
@ -120,9 +120,9 @@ module "apigee" {
project_id = "my-project"
environments = {
apis-test = {
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
display_name = "APIs test"
description = "APIs Test"
envgroups = ["test"]
}
}
}

View File

@ -21,7 +21,7 @@ The access variables are split into `access` and `access_identities` variables,
module "bigquery-dataset" {
source = "./fabric/modules/bigquery-dataset"
project_id = "my-project"
id = "my-dataset"
id = "my-dataset"
access = {
reader-group = { role = "READER", type = "group" }
owner = { role = "OWNER", type = "user" }
@ -46,7 +46,7 @@ Access configuration can also be specified via IAM instead of basic roles via th
module "bigquery-dataset" {
source = "./fabric/modules/bigquery-dataset"
project_id = "my-project"
id = "my-dataset"
id = "my-dataset"
iam = {
"roles/bigquery.dataOwner" = ["user:user1@example.org"]
}

View File

@ -16,19 +16,19 @@ This module allows managing a single BigTable instance, including access configu
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-west1-b"
tables = {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-west1-b"
tables = {
test1 = null,
test2 = {
split_keys = ["a", "b", "c"]
column_family = null
}
}
iam = {
iam = {
"roles/bigtable.user" = ["user:viewer@testdomain.com"]
}
}
@ -59,11 +59,11 @@ If you use autoscaling, you should not set the variable `num_nodes`.
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-b"
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-b"
autoscaling_config = {
min_nodes = 3
max_nodes = 7
@ -78,12 +78,12 @@ module "bigtable-instance" {
```hcl
module "bigtable-instance" {
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-a"
storage_type = "SSD"
source = "./fabric/modules/bigtable-instance"
project_id = "my-project"
name = "instance"
cluster_id = "instance"
zone = "europe-southwest1-a"
storage_type = "SSD"
autoscaling_config = {
min_nodes = 3
max_nodes = 7

View File

@ -29,7 +29,7 @@ module "budget" {
]
email_recipients = {
project_id = "my-project"
emails = ["user@example.com"]
emails = ["user@example.com"]
}
}
# tftest modules=1 resources=2

View File

@ -8,8 +8,8 @@ This module simplifies the creation of a Binary Authorization policy, attestors
```hcl
module "binauthz" {
source = "./fabric/modules/binauthz"
project_id = "my_project"
source = "./fabric/modules/binauthz"
project_id = "my_project"
global_policy_evaluation_mode = "DISABLE"
default_admission_rule = {
evaluation_mode = "ALWAYS_DENY"
@ -18,16 +18,16 @@ module "binauthz" {
}
cluster_admission_rules = {
"europe-west1-c.cluster" = {
evaluation_mode = "REQUIRE_ATTESTATION"
enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG"
attestors = [ "test" ]
evaluation_mode = "REQUIRE_ATTESTATION"
enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG"
attestors = ["test"]
}
}
attestors_config = {
"test": {
note_reference = null
pgp_public_keys = [
<<EOT
"test" : {
note_reference = null
pgp_public_keys = [
<<EOT
mQENBFtP0doBCADF+joTiXWKVuP8kJt3fgpBSjT9h8ezMfKA4aXZctYLx5wslWQl
bB7Iu2ezkECNzoEeU7WxUe8a61pMCh9cisS9H5mB2K2uM4Jnf8tgFeXn3akJDVo0
oR1IC+Dp9mXbRSK3MAvKkOwWlG99sx3uEdvmeBRHBOO+grchLx24EThXFOyP9Fk6
@ -44,11 +44,11 @@ module "binauthz" {
qoIRW6y0+UlAc+MbqfL0ziHDOAmcqz1GnROg
=6Bvm
EOT
]
pkix_public_keys = null
iam = {
"roles/viewer" = ["user:user1@my_org.com"]
}
]
pkix_public_keys = null
iam = {
"roles/viewer" = ["user:user1@my_org.com"]
}
}
}
}

View File

@ -24,7 +24,7 @@ This example will create a `cloud-config` that uses the module's defaults, creat
```hcl
module "cos-coredns" {
source = "./fabric/modules/cloud-config-container/coredns"
source = "./fabric/modules/cloud-config-container/coredns"
}
module "vm" {
@ -56,7 +56,7 @@ This example will create a `cloud-config` using a custom CoreDNS configuration,
```hcl
module "cos-coredns" {
source = "./fabric/modules/cloud-config-container/coredns"
source = "./fabric/modules/cloud-config-container/coredns"
coredns_config = "./fabric/modules/cloud-config-container/coredns/Corefile-hosts"
files = {
"/etc/coredns/example.hosts" = {
@ -64,7 +64,7 @@ module "cos-coredns" {
owner = null
permissions = "0644"
}
}
}
}
# tftest modules=0 resources=0
```

View File

@ -12,7 +12,7 @@ This example will create a `cloud-config` that starts [Envoy Proxy](https://www.
```hcl
module "cos-envoy" {
source = "./fabric/modules/cloud-config-container/cos-generic-metadata"
source = "./fabric/modules/cloud-config-container/cos-generic-metadata"
container_image = "envoyproxy/envoy:v1.14.1"
container_name = "envoy"
container_args = "-c /etc/envoy/envoy.yaml --log-level info --allow-unknown-static-fields"

View File

@ -62,7 +62,7 @@ module "cos-mysql" {
source = "./fabric/modules/cloud-config-container/mysql"
mysql_config = "./my.cnf"
mysql_password = "CiQAsd7WY=="
kms_config = {
kms_config = {
project_id = "my-project"
keyring = "test-cos"
location = "europe-west1"

View File

@ -24,7 +24,7 @@ This example will create a `cloud-config` that uses the module's defaults, creat
```hcl
module "cos-nginx" {
source = "./fabric/modules/cloud-config-container/nginx"
source = "./fabric/modules/cloud-config-container/nginx"
}
module "vm-nginx-tls" {

View File

@ -24,9 +24,9 @@ This example will create a `cloud-config` that allows any client in the 10.0.0.0
```hcl
module "cos-squid" {
source = "./fabric/modules/cloud-config-container/squid"
allow = [".github.com"]
clients = ["10.0.0.0/8"]
source = "./fabric/modules/cloud-config-container/squid"
allow = [".github.com"]
clients = ["10.0.0.0/8"]
}
module "vm" {

View File

@ -16,10 +16,10 @@ This deploys a Cloud Function with an HTTP endpoint, using a pre-existing GCS bu
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -31,11 +31,11 @@ module "cf-http" {
Analogous example using 2nd generation Cloud Functions
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
v2 = true
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
v2 = true
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -111,15 +111,15 @@ To allow anonymous access to the function, grant the `roles/cloudfunctions.invok
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
}
iam = {
iam = {
"roles/cloudfunctions.invoker" = ["allUsers"]
}
}
@ -132,15 +132,15 @@ You can have the module auto-create the GCS bucket used for deployment via the `
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bucket_config = {
lifecycle_delete_age_days = 1
}
bundle_config = {
source_dir = "fabric/assets/"
source_dir = "fabric/assets/"
}
}
# tftest modules=1 resources=3
@ -152,10 +152,10 @@ To use a custom service account managed by the module, set `service_account_crea
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -169,10 +169,10 @@ To use an externally managed service account, pass its email in `service_account
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets/"
output_path = "bundle.zip"
@ -188,10 +188,10 @@ In order to help prevent `archive_zip.output_md5` from changing cross platform (
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
bundle_config = {
source_dir = "fabric/assets"
output_path = "bundle.zip"
@ -207,10 +207,10 @@ This deploys a Cloud Function with an HTTP endpoint, using a pre-existing GCS bu
```hcl
module "cf-http" {
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
source = "./fabric/modules/cloud-function"
project_id = "my-project"
name = "test-cf-http"
bucket_name = "test-cf-bundles"
build_worker_pool = "projects/my-project/locations/europe-west1/workerPools/my_build_worker_pool"
bundle_config = {
source_dir = "fabric/assets"

View File

@ -46,7 +46,7 @@ module "group" {
]
managers = [
"user3@example.com"
]
]
}
# tftest modules=1 resources=5
```

View File

@ -14,18 +14,18 @@ module "cloud_run" {
project_id = "my-project"
name = "hello"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = {
command = null
args = null
env = {
"VAR1": "VALUE1",
"VAR2": "VALUE2",
env = {
"VAR1" : "VALUE1",
"VAR2" : "VALUE2",
}
env_from = null
}
ports = null
resources = null
ports = null
resources = null
volume_mounts = null
}]
}
@ -42,18 +42,18 @@ module "cloud_run" {
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = {
command = null
args = null
env = null
env_from = {
"CREDENTIALS": {
command = null
args = null
env = null
env_from = {
"CREDENTIALS" : {
name = "credentials"
key = "1"
key = "1"
}
}
}
ports = null
resources = null
ports = null
resources = null
volume_mounts = null
}]
}
@ -64,26 +64,26 @@ module "cloud_run" {
```hcl
module "cloud_run" {
source = "./fabric/modules/cloud-run"
project_id = var.project_id
name = "hello"
region = var.region
source = "./fabric/modules/cloud-run"
project_id = var.project_id
name = "hello"
region = var.region
revision_name = "green"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = null
ports = null
resources = null
image = "us-docker.pkg.dev/cloudrun/container/hello"
options = null
ports = null
resources = null
volume_mounts = {
"credentials": "/credentials"
"credentials" : "/credentials"
}
}]
volumes = [
{
name = "credentials"
name = "credentials"
secret_name = "credentials"
items = [{
key = "1"
key = "1"
path = "v1.txt"
}]
}
@ -98,9 +98,9 @@ This deploys a Cloud Run service with traffic split between two revisions.
```hcl
module "cloud_run" {
source = "./fabric/modules/cloud-run"
project_id = "my-project"
name = "hello"
source = "./fabric/modules/cloud-run"
project_id = "my-project"
name = "hello"
revision_name = "green"
containers = [{
image = "us-docker.pkg.dev/cloudrun/container/hello"
@ -110,7 +110,7 @@ module "cloud_run" {
volume_mounts = null
}]
traffic = {
"blue" = 25
"blue" = 25
"green" = 75
}
}
@ -159,8 +159,8 @@ module "cloud_run" {
}]
audit_log_triggers = [
{
service_name = "cloudresourcemanager.googleapis.com"
method_name = "SetIamPolicy"
service_name = "cloudresourcemanager.googleapis.com"
method_name = "SetIamPolicy"
}
]
}

View File

@ -88,7 +88,7 @@ module "db" {
# generatea password for user1
user1 = null
# assign a password to user2
user2 = "mypassword"
user2 = "mypassword"
}
}
# tftest modules=1 resources=6

View File

@ -243,9 +243,9 @@ module "nginx-mig" {
target_size = 3
instance_template = module.nginx-template.template.self_link
update_policy = {
minimal_action = "REPLACE"
type = "PROACTIVE"
min_ready_sec = 30
minimal_action = "REPLACE"
type = "PROACTIVE"
min_ready_sec = 30
max_surge = {
fixed = 1
}
@ -393,8 +393,8 @@ module "nginx-mig" {
stateful_config = {
# name needs to match a MIG instance name
instance-1 = {
minimal_action = "NONE",
most_disruptive_allowed_action = "REPLACE"
minimal_action = "NONE",
most_disruptive_allowed_action = "REPLACE"
preserved_state = {
disks = {
persistent-disk-1 = {

View File

@ -110,7 +110,7 @@ module "simple-vm-example" {
}
}]
service_account_create = true
create_template = true
create_template = true
}
# tftest modules=1 resources=2
```
@ -131,8 +131,8 @@ module "kms-vm-example" {
}]
attached_disks = [
{
name = "attached-disk"
size = 10
name = "attached-disk"
size = 10
}
]
service_account_create = true
@ -176,9 +176,9 @@ This example shows how to enable [gVNIC](https://cloud.google.com/compute/docs/n
```hcl
resource "google_compute_image" "cos-gvnic" {
project = "my-project"
name = "my-image"
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-89-16108-534-18"
project = "my-project"
name = "my-image"
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-89-16108-534-18"
guest_os_features {
type = "GVNIC"
@ -200,8 +200,8 @@ module "vm-with-gvnic" {
zone = "europe-west1-b"
name = "test"
boot_disk = {
image = google_compute_image.cos-gvnic.self_link
type = "pd-ssd"
image = google_compute_image.cos-gvnic.self_link
type = "pd-ssd"
}
network_interfaces = [{
network = var.vpc.self_link

View File

@ -12,7 +12,7 @@ module "cmn-dc" {
source = "./fabric/modules/data-catalog-policy-tag"
name = "my-datacatalog-policy-tags"
project_id = "my-project"
tags = {
tags = {
low = null, medium = null, high = null
}
}
@ -26,10 +26,10 @@ module "cmn-dc" {
source = "./fabric/modules/data-catalog-policy-tag"
name = "my-datacatalog-policy-tags"
project_id = "my-project"
tags = {
low = null
tags = {
low = null
medium = null
high = {"roles/datacatalog.categoryFineGrainedReader" = ["group:GROUP_NAME@example.com"]}
high = { "roles/datacatalog.categoryFineGrainedReader" = ["group:GROUP_NAME@example.com"] }
}
iam = {
"roles/datacatalog.categoryAdmin" = ["group:GROUP_NAME@example.com"]

View File

@ -8,11 +8,11 @@ This module allows simple management of ['Google Data Fusion'](https://cloud.goo
```hcl
module "datafusion" {
source = "./fabric/modules/datafusion"
name = "my-datafusion"
region = "europe-west1"
project_id = "my-project"
network = "my-network-name"
source = "./fabric/modules/datafusion"
name = "my-datafusion"
region = "europe-west1"
project_id = "my-project"
network = "my-network-name"
# TODO: remove the following line
firewall_create = false
}

View File

@ -10,11 +10,11 @@ This module allows the creation and management of folders, including support for
module "folder" {
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
group_iam = {
name = "Folder name"
group_iam = {
"cloud-owners@example.org" = [
"roles/owner",
"roles/resourcemanager.projectCreator"
"roles/owner",
"roles/resourcemanager.projectCreator"
]
}
iam = {
@ -32,7 +32,7 @@ To manage organization policies, the `orgpolicy.googleapis.com` service should b
module "folder" {
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
name = "Folder name"
org_policies = {
"compute.disableGuestAttributesAccess" = {
enforce = true
@ -85,9 +85,9 @@ In the same way as for the [organization](../organization) module, the in-built
```hcl
module "folder" {
source = "./fabric/modules/folder"
source = "./fabric/modules/folder"
parent = "organizations/1234567890"
name = "Folder name"
name = "Folder name"
firewall_policy_factory = {
cidr_file = "configs/firewall-policies/cidrs.yaml"
policy_name = null
@ -250,8 +250,8 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = null
description = "Environment specification."
iam = null
values = {
dev = null
prod = null

View File

@ -62,7 +62,7 @@ module "bucket" {
source = "./fabric/modules/gcs"
project_id = "myproject"
prefix = "test"
name = "my-bucket"
name = "my-bucket"
iam = {
"roles/storage.admin" = ["group:storage@example.com"]

View File

@ -22,7 +22,7 @@ module "cluster-1" {
master_authorized_ranges = {
internal-vms = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
max_pods_per_node = 32
private_cluster_config = {
@ -54,7 +54,7 @@ module "cluster-1" {
master_authorized_ranges = {
internal-vms = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
private_cluster_config = {
enable_private_endpoint = true

View File

@ -56,7 +56,7 @@ module "cluster_1" {
master_authorized_ranges = {
fc1918_10_8 = "10.0.0.0/8"
}
master_ipv4_cidr_block = "192.168.0.0/28"
master_ipv4_cidr_block = "192.168.0.0/28"
}
enable_features = {
dataplane_v2 = true
@ -115,7 +115,7 @@ module "hub" {
}
}
configmanagement_clusters = {
"default" = [ "cluster-1" ]
"default" = ["cluster-1"]
}
}
@ -216,7 +216,7 @@ module "cluster_1" {
mgmt = "10.0.0.0/28"
pods-cluster-1 = "10.3.0.0/16"
}
master_ipv4_cidr_block = "192.168.1.0/28"
master_ipv4_cidr_block = "192.168.1.0/28"
}
private_cluster_config = {
enable_private_endpoint = false
@ -240,10 +240,10 @@ module "cluster_1_nodepool" {
}
module "cluster_2" {
source = "./fabric/modules/gke-cluster"
project_id = module.project.project_id
name = "cluster-2"
location = "europe-west4"
source = "./fabric/modules/gke-cluster"
project_id = module.project.project_id
name = "cluster-2"
location = "europe-west4"
vpc_config = {
network = module.vpc.self_link
subnetwork = module.vpc.subnet_self_links["europe-west4/subnet-cluster-2"]
@ -251,7 +251,7 @@ module "cluster_2" {
mgmt = "10.0.0.0/28"
pods-cluster-1 = "10.3.0.0/16"
}
master_ipv4_cidr_block = "192.168.2.0/28"
master_ipv4_cidr_block = "192.168.2.0/28"
}
private_cluster_config = {
enable_private_endpoint = false
@ -264,11 +264,11 @@ module "cluster_2" {
}
module "cluster_2_nodepool" {
source = "./fabric/modules/gke-nodepool"
project_id = module.project.project_id
cluster_name = module.cluster_2.name
location = "europe-west4"
name = "nodepool"
source = "./fabric/modules/gke-nodepool"
project_id = module.project.project_id
cluster_name = module.cluster_2.name
location = "europe-west4"
name = "nodepool"
node_count = { initial = 1 }
service_account = { create = true }
tags = ["cluster-2-node"]
@ -277,7 +277,7 @@ module "cluster_2_nodepool" {
module "hub" {
source = "./fabric/modules/gke-hub"
project_id = module.project.project_id
clusters = {
clusters = {
cluster-1 = module.cluster_1.id
cluster-2 = module.cluster_2.id
}

View File

@ -10,11 +10,11 @@ If no specific node configuration is set via variables, the module uses the prov
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
}
# tftest modules=1 resources=1
```
@ -31,11 +31,11 @@ To use the GCE default service account, you can ignore the variable which is equ
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
}
# tftest modules=1 resources=1
```
@ -46,11 +46,11 @@ To use an existing service account, pass in just the `email` attribute.
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
service_account = {
email = "foo-bar@myproject.iam.gserviceaccount.com"
}
@ -64,11 +64,11 @@ To have the module create a service account, set the `create` attribute to `true
```hcl
module "cluster-1-nodepool-1" {
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
source = "./fabric/modules/gke-nodepool"
project_id = "myproject"
cluster_name = "cluster-1"
location = "europe-west1-b"
name = "nodepool-1"
service_account = {
create = true
# optional

View File

@ -8,12 +8,12 @@ Note that this module does not fully comply with our design principles, as outpu
```hcl
module "myproject-default-service-accounts" {
source = "./fabric/modules/iam-service-account"
project_id = "myproject"
name = "vm-default"
generate_key = true
source = "./fabric/modules/iam-service-account"
project_id = "myproject"
name = "vm-default"
generate_key = true
# authoritative roles granted *on* the service accounts to other identities
iam = {
iam = {
"roles/iam.serviceAccountUser" = ["user:foo@example.com"]
}
# non-authoritative roles granted *to* the service accounts on other resources

View File

@ -14,9 +14,9 @@ In this module **no lifecycle blocks are set on resources to prevent destroy**,
```hcl
module "kms" {
source = "./fabric/modules/kms"
project_id = "my-project"
iam = {
source = "./fabric/modules/kms"
project_id = "my-project"
iam = {
"roles/cloudkms.admin" = ["user:user1@example.com"]
}
keyring = { location = "europe-west1", name = "test" }
@ -63,8 +63,8 @@ module "kms" {
```hcl
module "kms" {
source = "./fabric/modules/kms"
project_id = "my-project"
source = "./fabric/modules/kms"
project_id = "my-project"
key_purpose = {
key-c = {
purpose = "ASYMMETRIC_SIGN"
@ -74,8 +74,8 @@ module "kms" {
}
}
}
keyring = { location = "europe-west1", name = "test" }
keys = { key-a = null, key-b = null, key-c = null }
keyring = { location = "europe-west1", name = "test" }
keys = { key-a = null, key-b = null, key-c = null }
}
# tftest modules=1 resources=4
```

View File

@ -27,12 +27,12 @@ module "addresses" {
project_id = var.project_id
internal_addresses = {
ilb-1 = {
purpose = "SHARED_LOADBALANCER_VIP"
purpose = "SHARED_LOADBALANCER_VIP"
region = var.region
subnetwork = var.subnet.self_link
}
ilb-2 = {
address = "10.0.0.2"
address = "10.0.0.2"
region = var.region
subnetwork = var.subnet.self_link
}
@ -66,11 +66,11 @@ module "addresses" {
project_id = var.project_id
psc_addresses = {
one = {
address = null
address = null
network = var.vpc.self_link
}
two = {
address = "10.0.0.32"
address = "10.0.0.32"
network = var.vpc.self_link
}
}

View File

@ -117,7 +117,7 @@ The module uses a classic Global Load Balancer by default. To use the non-classi
```hcl
module "glb-0" {
source = "./fabric/modules/net-glb"
source = "./fabric/modules/net-glb"
project_id = "myprj"
name = "glb-test-0"
use_classic_version = false
@ -320,8 +320,8 @@ module "glb-0" {
neg_configs = {
neg-0 = {
hybrid = {
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
endpoints = [{
ip_address = "10.0.0.10"
port = 80
@ -355,10 +355,10 @@ module "glb-0" {
neg_configs = {
neg-0 = {
internet = {
use_fqdn = true
use_fqdn = true
endpoints = [{
destination = "www.example.org"
port = 80
port = 80
}]
}
}
@ -373,7 +373,7 @@ The module supports managing PSC NEGs if the non-classic version of the load bal
```hcl
module "glb-0" {
source = "./fabric/modules/net-glb"
source = "./fabric/modules/net-glb"
project_id = "myprj"
name = "glb-test-0"
use_classic_version = false
@ -390,7 +390,7 @@ module "glb-0" {
neg_configs = {
neg-0 = {
psc = {
region = "europe-west8"
region = "europe-west8"
target_service = "europe-west8-cloudkms.googleapis.com"
}
}
@ -465,7 +465,7 @@ module "glb-0" {
pathmap = {
default_service = "default"
path_rules = [{
paths = ["/other", "/other/*"]
paths = ["/other", "/other/*"]
service = "other"
}]
}
@ -554,16 +554,16 @@ module "glb-0" {
neg-gce-0 = {
backends = [{
balancing_mode = "RATE"
backend = "neg-ew8-c"
backend = "neg-ew8-c"
max_rate = { per_endpoint = 10 }
}]
}
neg-hybrid-0 = {
backends = [{
backend = "neg-hello"
backend = "neg-hello"
}]
health_checks = ["neg"]
protocol = "HTTPS"
health_checks = ["neg"]
protocol = "HTTPS"
}
}
group_configs = {
@ -600,7 +600,7 @@ module "glb-0" {
gce = {
network = "projects/myprj-host/global/networks/svpc"
subnetwork = "projects/myprj-host/regions/europe-west8/subnetworks/gce"
zone = "europe-west8-c"
zone = "europe-west8-c"
endpoints = [{
instance = "nginx-ew8-c"
ip_address = "10.24.32.26"
@ -610,8 +610,8 @@ module "glb-0" {
}
neg-hello = {
hybrid = {
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
network = "projects/myprj-host/global/networks/svpc"
zone = "europe-west8-b"
endpoints = [{
ip_address = "192.168.0.3"
port = 443

View File

@ -176,7 +176,7 @@ module "ilb-l7" {
backend_service_configs = {
default = {
port_name = "http"
backends = [
backends = [
{ group = "default" }
]
}
@ -237,7 +237,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -245,11 +245,11 @@ module "ilb-l7" {
neg_configs = {
my-neg = {
gce = {
zone = "europe-west1-b"
zone = "europe-west1-b"
endpoints = [{
instance = "test-1"
ip_address = "10.0.0.10"
port = 80
port = 80
}]
}
}
@ -274,7 +274,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -282,10 +282,10 @@ module "ilb-l7" {
neg_configs = {
my-neg = {
hybrid = {
zone = "europe-west1-b"
zone = "europe-west1-b"
endpoints = [{
ip_address = "10.0.0.10"
port = 80
port = 80
}]
}
}
@ -310,7 +310,7 @@ module "ilb-l7" {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
group = "my-neg"
max_rate = { per_endpoint = 1 }
}]
}
@ -367,7 +367,7 @@ module "ilb-l7" {
pathmap = {
default_service = "default"
path_rules = [{
paths = ["/video", "/video/*"]
paths = ["/video", "/video/*"]
service = "video"
}]
}
@ -521,7 +521,7 @@ module "ilb-l7" {
}
neg-home-hello = {
hybrid = {
zone = "europe-west8-b"
zone = "europe-west8-b"
endpoints = [{
ip_address = "192.168.0.3"
port = 443

View File

@ -37,7 +37,7 @@ module "ilb" {
}
}
backends = [{
group = module.ilb.groups.my-group.self_link
group = module.ilb.groups.my-group.self_link
}]
health_check_config = {
http = {
@ -96,7 +96,7 @@ module "ilb" {
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
ports = [80]
backends = [
for z, mod in module.instance-group : {

View File

@ -44,11 +44,11 @@ module "firewall" {
default_rules_config = {
admin_ranges = ["10.0.0.0/8"]
}
egress_rules = {
egress_rules = {
# implicit `deny` action
allow-egress-rfc1918 = {
description = "Allow egress to RFC 1918 ranges."
destination_ranges = [
destination_ranges = [
"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"
]
# implicit { protocol = "all" } rule
@ -108,7 +108,7 @@ module "firewall" {
project_id = "my-project"
network = "my-network"
default_rules_config = {
ssh_ranges = []
ssh_ranges = []
}
}
# tftest modules=1 resources=2
@ -134,9 +134,9 @@ The module includes a rules factory (see [Resource Factories](../../blueprints/f
```hcl
module "firewall" {
source = "./fabric/modules/net-vpc-firewall"
project_id = "my-project"
network = "my-network"
source = "./fabric/modules/net-vpc-firewall"
project_id = "my-project"
network = "my-network"
factories_config = {
rules_folder = "configs/firewall/rules"
cidr_tpl_file = "configs/firewall/cidrs.yaml"

View File

@ -45,9 +45,9 @@ module "vpc-hub" {
project_id = "hub"
name = "vpc-hub"
subnets = [{
ip_cidr_range = "10.0.0.0/24"
name = "subnet-1"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "subnet-1"
region = "europe-west1"
}]
}
@ -56,9 +56,9 @@ module "vpc-spoke-1" {
project_id = "spoke1"
name = "vpc-spoke1"
subnets = [{
ip_cidr_range = "10.0.1.0/24"
name = "subnet-2"
region = "europe-west1"
ip_cidr_range = "10.0.1.0/24"
name = "subnet-2"
region = "europe-west1"
}]
peering_config = {
peer_vpc_self_link = module.vpc-hub.self_link
@ -75,8 +75,8 @@ module "vpc-spoke-1" {
```hcl
locals {
service_project_1 = {
project_id = "project1"
gke_service_account = "gke"
project_id = "project1"
gke_service_account = "gke"
cloud_services_service_account = "cloudsvc"
}
service_project_2 = {
@ -128,9 +128,9 @@ module "vpc" {
name = "my-network"
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
psa_config = {
@ -151,13 +151,13 @@ module "vpc" {
name = "my-network"
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
psa_config = {
ranges = { myrange = "10.0.1.0/24" }
ranges = { myrange = "10.0.1.0/24" }
export_routes = true
import_routes = true
}
@ -205,7 +205,7 @@ module "vpc" {
project_id = "my-project"
name = "my-network"
dns_policy = {
inbound = true
inbound = true
outbound = {
private_ns = ["10.0.0.1"]
public_ns = ["8.8.8.8"]
@ -213,9 +213,9 @@ module "vpc" {
}
subnets = [
{
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
ip_cidr_range = "10.0.0.0/24"
name = "production"
region = "europe-west1"
}
]
}

View File

@ -23,11 +23,11 @@ module "vm" {
module "vpn-dynamic" {
source = "./fabric/modules/net-vpn-dynamic"
project_id = "my-project"
region = "europe-west1"
network = var.vpc.name
name = "gateway-1"
source = "./fabric/modules/net-vpn-dynamic"
project_id = "my-project"
region = "europe-west1"
network = var.vpc.name
name = "gateway-1"
router_config = {
asn = 64514
}

View File

@ -13,7 +13,7 @@ module "vpn-1" {
name = "net1-to-net-2"
peer_gateway = { gcp = module.vpn-2.self_link }
router_config = {
asn = 64514
asn = 64514
custom_advertise = {
all_subnets = true
ip_ranges = {
@ -48,7 +48,7 @@ module "vpn-2" {
network = var.vpc2.self_link
name = "net2-to-net1"
router_config = { asn = 64513 }
peer_gateway = { gcp = module.vpn-1.self_link}
peer_gateway = { gcp = module.vpn-1.self_link }
tunnels = {
remote-0 = {
bgp_peer = {

View File

@ -16,7 +16,7 @@ To manage organization policies, the `orgpolicy.googleapis.com` service should b
module "org" {
source = "./fabric/modules/organization"
organization_id = "organizations/1234567890"
group_iam = {
group_iam = {
"cloud-owners@example.org" = ["roles/owner", "roles/projectCreator"]
}
iam = {
@ -96,7 +96,7 @@ To manage organization policy custom constraints, the `orgpolicy.googleapis.com`
module "org" {
source = "./fabric/modules/organization"
organization_id = var.organization_id
org_policy_custom_constraints = {
"custom.gkeEnableAutoUpgrade" = {
resource_types = ["container.googleapis.com/NodePool"]
@ -126,8 +126,8 @@ The example below deploys a few org policy custom constraints split between two
```hcl
module "org" {
source = "./fabric/modules/organization"
organization_id = var.organization_id
source = "./fabric/modules/organization"
organization_id = var.organization_id
org_policy_custom_constraints_data_path = "configs/custom-constraints"
org_policies = {
"custom.gkeEnableAutoUpgrade" = {
@ -333,7 +333,7 @@ module "org" {
debug = {
destination = module.bucket.id
filter = "severity=DEBUG"
exclusions = {
exclusions = {
no-compute = "logName:compute"
}
type = "logging"
@ -374,12 +374,12 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = {
description = "Environment specification."
iam = {
"roles/resourcemanager.tagAdmin" = ["group:admins@example.com"]
}
values = {
dev = {}
dev = {}
prod = {
description = "Environment: production."
iam = {
@ -405,13 +405,13 @@ module "org" {
organization_id = var.organization_id
network_tags = {
net-environment = {
description = "This is a network tag."
network = "my_project/my_vpc"
iam = {
description = "This is a network tag."
network = "my_project/my_vpc"
iam = {
"roles/resourcemanager.tagAdmin" = ["group:admins@example.com"]
}
values = {
dev = null
dev = null
prod = {
description = "Environment: production."
iam = {

View File

@ -26,7 +26,7 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
@ -48,7 +48,7 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
@ -70,17 +70,17 @@ Additive IAM is typically used where bindings for specific roles are controlled
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
iam_additive = {
"roles/viewer" = [
"roles/viewer" = [
"group:one@example.org",
"group:two@xample.org"
],
"roles/storage.objectAdmin" = [
"roles/storage.objectAdmin" = [
"group:two@example.org"
],
"roles/owner" = [
"roles/owner" = [
"group:three@example.org"
],
}
@ -94,15 +94,15 @@ As mentioned above, there are cases where authoritative management of specific I
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
group_iam = {
"foo@example.com" = [
"roles/editor"
]
}
iam = {
"roles/editor" = [
"roles/editor" = [
"serviceAccount:${module.project.service_accounts.cloud_services}"
]
}
@ -120,8 +120,8 @@ You can enable Shared VPC Host at the project level and manage project service a
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
shared_vpc_host_config = {
enabled = true
}
@ -133,16 +133,16 @@ module "project" {
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
shared_vpc_service_config = {
attach = true
host_project = "my-host-project"
attach = true
host_project = "my-host-project"
service_identity_iam = {
"roles/compute.networkUser" = [
"roles/compute.networkUser" = [
"cloudservices", "container-engine"
]
"roles/vpcaccess.user" = [
"roles/vpcaccess.user" = [
"cloudrun"
]
"roles/container.hostServiceAgentUser" = [
@ -165,7 +165,7 @@ module "project" {
name = "project-example"
parent = "folders/1234567890"
prefix = "foo"
services = [
services = [
"container.googleapis.com",
"stackdriver.googleapis.com"
]
@ -409,8 +409,8 @@ module "org" {
organization_id = var.organization_id
tags = {
environment = {
description = "Environment specification."
iam = null
description = "Environment specification."
iam = null
values = {
dev = null
prod = null
@ -438,8 +438,8 @@ One non-obvious output is `service_accounts`, which offers a simple way to disco
```hcl
module "project" {
source = "./fabric/modules/project"
name = "project-example"
source = "./fabric/modules/project"
name = "project-example"
services = [
"compute.googleapis.com"
]

View File

@ -28,7 +28,7 @@ module "topic_with_schema" {
name = "my-topic"
schema = {
msg_encoding = "JSON"
schema_type = "AVRO"
schema_type = "AVRO"
definition = jsonencode({
"type" = "record",
"name" = "Avro",

View File

@ -16,7 +16,7 @@ The secret replication policy is automatically managed if no location is set, or
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}
@ -32,12 +32,12 @@ IAM bindings can be set per secret in the same way as for most other modules sup
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}
iam = {
test-auto = {
test-auto = {
"roles/secretmanager.secretAccessor" = ["group:auto-readers@example.com"]
}
test-manual = {
@ -56,7 +56,7 @@ As mentioned above, please be aware that **version data will be stored in state
module "secret-manager" {
source = "./fabric/modules/secret-manager"
project_id = "my-project"
secrets = {
secrets = {
test-auto = null
test-manual = ["europe-west1", "europe-west4"]
}

View File

@ -11,10 +11,10 @@ It can be used in conjunction with the [DNS](../dns) module to create [service-d
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
iam = {
"roles/servicedirectory.editor" = [
"serviceAccount:namespace-editor@example.com"
@ -28,10 +28,10 @@ module "service-directory" {
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "sd-1"
services = {
one = {
endpoints = ["first", "second"]
@ -59,9 +59,9 @@ Wiring a service directory namespace to a private DNS zone allows querying the n
```hcl
module "service-directory" {
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
source = "./fabric/modules/service-directory"
project_id = "my-project"
location = "europe-west1"
name = "apps"
iam = {
"roles/servicedirectory.editor" = [

View File

@ -27,16 +27,16 @@ module "repo" {
name = "my-repo"
triggers = {
foo = {
filename = "ci/workflow-foo.yaml"
included_files = ["**/*tf"]
filename = "ci/workflow-foo.yaml"
included_files = ["**/*tf"]
service_account = null
substitutions = {
BAR = 1
}
template = {
branch_name = "main"
project_id = null
tag_name = null
project_id = null
tag_name = null
}
}
}

View File

@ -120,7 +120,7 @@ module "test" {
to = {
operations = [{
method_selectors = ["*"]
service_name = "storage.googleapis.com"
service_name = "storage.googleapis.com"
}]
resources = ["projects/123456789"]
}

View File

@ -24,9 +24,9 @@ BLUEPRINTS_PATH = FABRIC_ROOT / 'blueprints/'
MODULES_PATH = FABRIC_ROOT / 'modules/'
SUBMODULES_PATH = MODULES_PATH / 'cloud-config-container'
FILE_TEST_RE = re.compile(r'# tftest-file id=(\w+) path=([\S]+)')
FILE_TEST_RE = re.compile(r'# tftest-file +id=(\w+) +path=([\S]+)')
Example = collections.namedtuple('Example', 'code module files')
Example = collections.namedtuple('Example', 'name code module files')
File = collections.namedtuple('File', 'path content')
@ -71,11 +71,11 @@ def pytest_generate_tests(metafunc):
continue
if child.lang == 'hcl':
path = module.relative_to(FABRIC_ROOT)
examples.append(Example(code, path, files[last_header]))
name = f'{path}:{last_header}'
if index > 1:
name += f' {index}'
ids.append(name)
examples.append(Example(name, code, path, files[last_header]))
elif isinstance(child, marko.block.Heading):
last_header = child.children[0].children
index = 0

View File

@ -13,6 +13,7 @@
# limitations under the License.
import re
import subprocess
from pathlib import Path
BASE_PATH = Path(__file__).parent
@ -52,5 +53,12 @@ def test_example(plan_validator, tmp_path, example):
assert expected_modules == num_modules, 'wrong number of modules'
assert expected_resources == num_resources, 'wrong number of resources'
# TODO(jccb): this should probably be done in check_documentation
# but we already have all the data here.
result = subprocess.run(
'terraform fmt -check -diff -no-color main.tf'.split(), cwd=tmp_path,
stdout=subprocess.PIPE, encoding='utf-8')
assert result.returncode == 0, f'terraform code not formatted correctly\n{result.stdout}'
else:
assert False, "can't find tftest directive"