parent
60d579be4d
commit
87cd83f5c0
|
@ -981,7 +981,7 @@ tests:
|
|||
# run a test named `test-plan`, load the specified tfvars files
|
||||
# use the default inventory file of `test-plan.yaml`
|
||||
test-plan:
|
||||
tfvars: # if ommited, we load test-plan.tfvars by default
|
||||
tfvars: # if omitted, we load test-plan.tfvars by default
|
||||
- test-plan.tfvars
|
||||
- test-plan-extra.tfvars
|
||||
inventory:
|
||||
|
@ -991,7 +991,7 @@ tests:
|
|||
# extra_files:
|
||||
# - ../plugin-x/*.tf
|
||||
|
||||
# You can ommit the tfvars and inventory sections and they will
|
||||
# You can omit the tfvars and inventory sections and they will
|
||||
# default to the name of the test. The following two examples are equivalent:
|
||||
#
|
||||
# test-plan2:
|
||||
|
|
|
@ -6,7 +6,7 @@ The architecture is the one depicted below.
|
|||
|
||||
![Diagram](diagram.png)
|
||||
|
||||
To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accesible through VPN.
|
||||
To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accessible through VPN.
|
||||
|
||||
## Running the blueprint
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ variable "dashboard_json_path" {
|
|||
}
|
||||
|
||||
variable "discovery_config" {
|
||||
description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empy, every project under the discovery root node will be monitored."
|
||||
description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empty, every project under the discovery root node will be monitored."
|
||||
type = object({
|
||||
discovery_root = string
|
||||
monitored_folders = list(string)
|
||||
|
|
|
@ -28,7 +28,7 @@ Labels are set with project id (which may differ from the monitoring workspace p
|
|||
|
||||
<img src="explorer.png" width="640px" alt="GCP Metrics Explorer, usage, limit and utilization view sample">
|
||||
|
||||
The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been creaed.
|
||||
The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been created.
|
||||
|
||||
## Running the blueprint
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ module "sa-tfc" {
|
|||
|
||||
iam = {
|
||||
# We allow only tokens generated by a specific TFC workspace impersonation of the service account,
|
||||
# that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specifc SA
|
||||
# that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specific SA
|
||||
"roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfc-pool.name}/attribute.terraform_workspace_id/${var.tfc_workspace_id}"]
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ func getEnv(key, fallback string) string {
|
|||
return fallback
|
||||
}
|
||||
|
||||
// GetConfiguration generates configration by reading ENV variables.
|
||||
// GetConfiguration generates configuration by reading ENV variables.
|
||||
func GetConfiguration() (*Configuration, error) {
|
||||
timeout, err := time.ParseDuration(getEnv("TIMEOUT", "1000ms"))
|
||||
if err != nil {
|
||||
|
|
|
@ -27,7 +27,7 @@ This sample creates\updates several distinct groups of resources:
|
|||
|---|---|:---:|:---:|:---:|
|
||||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [project_create](variables.tf#L31) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||
| [project_name](variables.tf#L40) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ variable "migration_target_projects" {
|
|||
}
|
||||
|
||||
variable "migration_viewer_users" {
|
||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
||||
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ This sample creates\update several distinct groups of resources:
|
|||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
||||
| [sharedvpc_host_projects](variables.tf#L45) | List of host projects that share a VPC with the selected target projects. | <code>list(string)</code> | ✓ | |
|
||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [project_create](variables.tf#L30) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||
| [project_name](variables.tf#L39) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ variable "migration_target_projects" {
|
|||
}
|
||||
|
||||
variable "migration_viewer_users" {
|
||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
||||
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ This sample creates several distinct groups of resources:
|
|||
| name | description | type | required | default |
|
||||
|---|---|:---:|:---:|:---:|
|
||||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||
| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [project_create](variables.tf#L26) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||
| [project_name](variables.tf#L35) | Name of an existing project or of the new project assigned as M4CE host an target project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||
| [vpc_config](variables.tf#L41) | Parameters to create a simple VPC on the M4CE project. | <code title="object({ ip_cidr_range = string, region = string })">object({…})</code> | | <code title="{ ip_cidr_range = "10.200.0.0/20", region = "us-west2" }">{…}</code> |
|
||||
|
|
|
@ -18,7 +18,7 @@ variable "migration_admin_users" {
|
|||
}
|
||||
|
||||
variable "migration_viewer_users" {
|
||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
||||
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ In this tutorial we will also see how to make explainable predictions, in order
|
|||
|
||||
# Dataset
|
||||
|
||||
This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicy available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`.
|
||||
This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicly available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`.
|
||||
|
||||
# Goal
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"# deploy the BigQuery ML model on Vertex Endpoint\n",
|
||||
"# have a coffe - this step can take up 10/15 minutes to finish\n",
|
||||
"# have a coffee - this step can take up 10/15 minutes to finish\n",
|
||||
"model.deploy(endpoint=endpoint, deployed_model_display_name='bqml-deployed-model')"
|
||||
]
|
||||
},
|
||||
|
@ -436,7 +436,7 @@
|
|||
"\n",
|
||||
"Thanks to this tutorial we were able to:\n",
|
||||
"- Define a re-usable Vertex AI pipeline to train and evaluate BQ ML models\n",
|
||||
"- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different paramenters (in this case a different split for train/test data)\n",
|
||||
"- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different parameters (in this case a different split for train/test data)\n",
|
||||
"- Deploy the preferred model on a Vertex AI managed Endpoint in order to serve the model for real-time use cases via API\n",
|
||||
"- Make batch prediction via Big Query and see what are the top 5 features which influenced the algorithm output"
|
||||
]
|
||||
|
|
|
@ -60,7 +60,7 @@ Once you have the required information, head back to the cloud shell editor. Mak
|
|||
|
||||
Configure the Terraform variables in your `terraform.tfvars` file. You need to specify at least the `project_id` and `prefix` variables. See [`terraform.tfvars.sample`](terraform.tfvars.sample) as starting point.
|
||||
|
||||
![Deploy ressources](images/image2.png)
|
||||
![Deploy resources](images/image2.png)
|
||||
|
||||
Run Terraform init:
|
||||
|
||||
|
@ -71,7 +71,7 @@ terraform apply
|
|||
|
||||
The resource creation will take a few minutes, at the end this is the output you should expect for successful completion along with a list of the created resources:
|
||||
|
||||
![Ressources installed](images/image3.png)
|
||||
![Resources installed](images/image3.png)
|
||||
|
||||
## Move to real use case consideration
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# tfdoc:file:description Trasformation project and VPC.
|
||||
# tfdoc:file:description Transformation project and VPC.
|
||||
|
||||
locals {
|
||||
iam_trf = {
|
||||
|
|
|
@ -22,7 +22,7 @@ import argparse
|
|||
|
||||
class ParseRow(beam.DoFn):
|
||||
"""
|
||||
Splits a given csv row by a seperator, validates fields and returns a dict
|
||||
Splits a given csv row by a separator, validates fields and returns a dict
|
||||
structure compatible with the BigQuery transform
|
||||
"""
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ As is often the case in real-world configurations, this blueprint accepts as inp
|
|||
|
||||
If the network_config variable is not provided, one VPC will be created in each project that supports network resources (load, transformation and orchestration).
|
||||
|
||||
## Deploy your enviroment
|
||||
## Deploy your environment
|
||||
|
||||
We assume the identiy running the following steps has the following role:
|
||||
|
||||
|
@ -35,7 +35,7 @@ Run Terraform init:
|
|||
terraform init
|
||||
```
|
||||
|
||||
Configure the Terraform variable in your terraform.tfvars file. You need to spefify at least the following variables:
|
||||
Configure the Terraform variable in your terraform.tfvars file. You need to specify at least the following variables:
|
||||
|
||||
```
|
||||
prefix = "prefix"
|
||||
|
@ -48,7 +48,7 @@ You can run now:
|
|||
terraform apply
|
||||
```
|
||||
|
||||
You can now connect to the Vertex AI notbook to perform your data analysy.
|
||||
You can now connect to the Vertex AI notbook to perform your data analysis.
|
||||
<!-- BEGIN TFDOC -->
|
||||
|
||||
## Variables
|
||||
|
|
|
@ -58,7 +58,7 @@ variable "region" {
|
|||
default = "europe-west1"
|
||||
}
|
||||
|
||||
variable "service_encryption_keys" { # service encription key
|
||||
variable "service_encryption_keys" { # service encryption key
|
||||
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
||||
type = object({
|
||||
bq = string
|
||||
|
|
|
@ -112,7 +112,7 @@ The Shielded Folder blueprint is meant to be executed by a Service Account (or a
|
|||
- `roles/resourcemanager.folderAdmin`
|
||||
- `roles/resourcemanager.projectCreator`
|
||||
|
||||
The shielded Folfer blueprint assumes [groups described](#user-groups) are created in your GCP organization.
|
||||
The shielded Folder blueprint assumes [groups described](#user-groups) are created in your GCP organization.
|
||||
|
||||
### Variable configuration PIPPO
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ variable "vpc_sc_access_levels" {
|
|||
}
|
||||
|
||||
variable "vpc_sc_egress_policies" {
|
||||
description = "VPC SC egress policy defnitions."
|
||||
description = "VPC SC egress policy definitions."
|
||||
type = map(object({
|
||||
from = object({
|
||||
identity_type = optional(string, "ANY_IDENTITY")
|
||||
|
@ -208,7 +208,7 @@ variable "vpc_sc_egress_policies" {
|
|||
}
|
||||
|
||||
variable "vpc_sc_ingress_policies" {
|
||||
description = "VPC SC ingress policy defnitions."
|
||||
description = "VPC SC ingress policy definitions."
|
||||
type = map(object({
|
||||
from = object({
|
||||
access_levels = optional(list(string), [])
|
||||
|
|
|
@ -36,7 +36,7 @@ Assign roles relying on User groups is a way to decouple the final set of permis
|
|||
|
||||
We use the following groups to control access to resources:
|
||||
|
||||
- *Data Scientits* (gcp-ml-ds@<company.org>). They manage notebooks and create ML pipelines.
|
||||
- *Data Scientist* (gcp-ml-ds@<company.org>). They manage notebooks and create ML pipelines.
|
||||
- *ML Engineers* (gcp-ml-eng@<company.org>). They manage the different Vertex resources.
|
||||
- *ML Viewer* (gcp-ml-eng@<company.org>). Group with wiewer permission for the different resources.
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ spec:
|
|||
interfaces:
|
||||
variables:
|
||||
- name: notebooks
|
||||
description: Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed.
|
||||
description: Vertex AI workbenches to be deployed. Service Account runtime/instances deployed.
|
||||
type: |-
|
||||
map(object({
|
||||
type = string
|
||||
|
|
|
@ -67,7 +67,7 @@ variable "network_config" {
|
|||
}
|
||||
|
||||
variable "notebooks" {
|
||||
description = "Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed."
|
||||
description = "Vertex AI workbenches to be deployed. Service Account runtime/instances deployed."
|
||||
type = map(object({
|
||||
type = string
|
||||
machine_type = optional(string, "n1-standard-4")
|
||||
|
|
|
@ -10,7 +10,7 @@ You can create as many files as you like, the code will loop through it and crea
|
|||
|
||||
### Terraform code
|
||||
|
||||
In this section we show how to create tables and views from a file structure simlar to the one shown below.
|
||||
In this section we show how to create tables and views from a file structure similar to the one shown below.
|
||||
```bash
|
||||
bigquery
|
||||
│
|
||||
|
|
|
@ -253,7 +253,7 @@ module "gke" {
|
|||
| [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | <code title="map(object({ cluster_autoscaling = optional(any) description = optional(string) enable_addons = optional(any, { horizontal_pod_autoscaling = true, http_load_balancing = true }) enable_features = optional(any, { workload_identity = true }) issue_client_certificate = optional(bool, false) labels = optional(map(string)) location = string logging_config = optional(list(string), ["SYSTEM_COMPONENTS"]) maintenance_config = optional(any, { daily_window_start_time = "03:00" recurring_window = null maintenance_exclusion = [] }) max_pods_per_node = optional(number, 110) min_master_version = optional(string) monitoring_config = optional(object({ enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) managed_prometheus = optional(bool) })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) vpc_config = object({ subnetwork = string network = optional(string) secondary_range_blocks = optional(object({ pods = string services = string })) secondary_range_names = optional(object({ pods = string services = string }), { pods = "pods", services = "services" }) master_authorized_ranges = optional(map(string)) master_ipv4_cidr_block = optional(string) }) }))">map(object({…}))</code> | | <code>{}</code> |
|
||||
| [fleet_configmanagement_clusters](variables.tf#L70) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | <code>map(list(string))</code> | | <code>{}</code> |
|
||||
| [fleet_configmanagement_templates](variables.tf#L77) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | <code title="map(object({ binauthz = bool config_sync = object({ git = object({ gcp_service_account_email = string https_proxy = string policy_dir = string secret_type = string sync_branch = string sync_repo = string sync_rev = string sync_wait_secs = number }) prevent_drift = string source_format = string }) hierarchy_controller = object({ enable_hierarchical_resource_quota = bool enable_pod_tree_labels = bool }) policy_controller = object({ audit_interval_seconds = number exemptable_namespaces = list(string) log_denies_enabled = bool referential_rules_enabled = bool template_library_installed = bool }) version = string }))">map(object({…}))</code> | | <code>{}</code> |
|
||||
| [fleet_features](variables.tf#L112) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | <code title="object({ appdevexperience = bool configmanagement = bool identityservice = bool multiclusteringress = string multiclusterservicediscovery = bool servicemesh = bool })">object({…})</code> | | <code>null</code> |
|
||||
| [fleet_features](variables.tf#L112) | Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | <code title="object({ appdevexperience = bool configmanagement = bool identityservice = bool multiclusteringress = string multiclusterservicediscovery = bool servicemesh = bool })">object({…})</code> | | <code>null</code> |
|
||||
| [fleet_workload_identity](variables.tf#L125) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | <code>bool</code> | | <code>false</code> |
|
||||
| [group_iam](variables.tf#L137) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | <code>map(list(string))</code> | | <code>{}</code> |
|
||||
| [iam](variables.tf#L144) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | <code>map(list(string))</code> | | <code>{}</code> |
|
||||
|
|
|
@ -110,7 +110,7 @@ variable "fleet_configmanagement_templates" {
|
|||
}
|
||||
|
||||
variable "fleet_features" {
|
||||
description = "Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used."
|
||||
description = "Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used."
|
||||
type = object({
|
||||
appdevexperience = bool
|
||||
configmanagement = bool
|
||||
|
|
|
@ -41,7 +41,7 @@ http_access deny !safe_ports
|
|||
# deny CONNECT if connection is not using ssl
|
||||
http_access deny CONNECT !ssl_ports
|
||||
|
||||
# deny acccess to cachemgr
|
||||
# deny access to cachemgr
|
||||
http_access deny manager
|
||||
|
||||
# deny access to localhost through the proxy
|
||||
|
|
|
@ -43,7 +43,7 @@ Before applying this Terraform
|
|||
| [dest_ip_address](variables.tf#L17) | On-prem service destination IP address. | <code>string</code> | ✓ | |
|
||||
| [prefix](variables.tf#L28) | Prefix used for resource names. | <code>string</code> | ✓ | |
|
||||
| [producer](variables.tf#L37) | Producer configuration. | <code title="object({ subnet_main = string # CIDR subnet_proxy = string # CIDR subnet_psc = string # CIDR accepted_limits = map(number) # Accepted project ids => PSC endpoint limit })">object({…})</code> | ✓ | |
|
||||
| [project_id](variables.tf#L53) | When referncing existing projects, the id of the project where resources will be created. | <code>string</code> | ✓ | |
|
||||
| [project_id](variables.tf#L53) | When referencing existing projects, the id of the project where resources will be created. | <code>string</code> | ✓ | |
|
||||
| [region](variables.tf#L58) | Region where resources will be created. | <code>string</code> | ✓ | |
|
||||
| [subnet_consumer](variables.tf#L63) | Consumer subnet CIDR. | <code>string # CIDR</code> | ✓ | |
|
||||
| [zone](variables.tf#L102) | Zone where resources will be created. | <code>string</code> | ✓ | |
|
||||
|
|
|
@ -51,7 +51,7 @@ variable "project_create" {
|
|||
}
|
||||
|
||||
variable "project_id" {
|
||||
description = "When referncing existing projects, the id of the project where resources will be created."
|
||||
description = "When referencing existing projects, the id of the project where resources will be created."
|
||||
type = string
|
||||
}
|
||||
|
||||
|
|
|
@ -122,11 +122,11 @@ Note the different PSC endpoints created in each project and the different IPs.
|
|||
|
||||
#### Use case 3.2
|
||||
|
||||
It is possible to block access from the Internet restoring `ingress_settigns` to `"internal"` but this will also block access from any other project. This feature is interesting, as will be shown in the next use case.
|
||||
It is possible to block access from the Internet restoring `ingress_settings` to `"internal"` but this will also block access from any other project. This feature is interesting, as will be shown in the next use case.
|
||||
|
||||
<p style="left"> <img src="images/use-case-3.2.png" width="800"> </p>
|
||||
|
||||
Simply omit `ingress_settigns` in `terraform.tfvars`:
|
||||
Simply omit `ingress_settings` in `terraform.tfvars`:
|
||||
|
||||
```tfvars
|
||||
prj_main_id = "[your-main-project-id]"
|
||||
|
@ -135,7 +135,7 @@ prj_prj1_id = "[your-project1-id]"
|
|||
|
||||
#### Use case 3.3
|
||||
|
||||
To allow access from other projects while keeping access from the Internet restricted, you need to add those projects to a VPC SC perimeter together with Cloud Run. Projects outisde the perimeter will be blocked. This way you can control which projects can gain access.
|
||||
To allow access from other projects while keeping access from the Internet restricted, you need to add those projects to a VPC SC perimeter together with Cloud Run. Projects outside the perimeter will be blocked. This way you can control which projects can gain access.
|
||||
|
||||
<p style="left"> <img src="images/use-case-3.3.png" width="800"> </p>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ The blueprints in this folder show how to automate installation of specific thir
|
|||
|
||||
### OpenShift cluster bootstrap on Shared VPC
|
||||
|
||||
<a href="./openshift/" title="HubOpenShift boostrap example"><img src="./openshift/diagram.png" align="left" width="280px"></a> This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks.
|
||||
<a href="./openshift/" title="HubOpenShift bootstrap example"><img src="./openshift/diagram.png" align="left" width="280px"></a> This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks.
|
||||
|
||||
<br clear="left">
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ Variable configuration is best done in a `.tfvars` file, but can also be done di
|
|||
<dt><code>disk_encryption_key</code></dt>
|
||||
<dd>Set to <code>null</code> if you are not using CMEK keys for disk encryption. If you are using it, ensure the GCE robot account has permissions on the key.</dd>
|
||||
<dt><code>fs_paths</code></dt>
|
||||
<dd>Filesystem paths for the external dependencies. Home path expansion is supported. The <code>config_dir</code> path is where generated ignition files will be created. Ensure it's empty (incuding hidden files) before starting the installation process.</dd>
|
||||
<dd>Filesystem paths for the external dependencies. Home path expansion is supported. The <code>config_dir</code> path is where generated ignition files will be created. Ensure it's empty (including hidden files) before starting the installation process.</dd>
|
||||
<dt><code>host_project</code></dt>
|
||||
<dd>If you don't need installing in different subnets, pass the same subnet names for the default, masters, and workers subnets.</dd>
|
||||
<dt><code>install_config_params</code></dt>
|
||||
|
|
|
@ -12,7 +12,7 @@ This example is a companion setup to the Python script in the parent folder, and
|
|||
| [fs_paths](variables.tf#L44) | Filesystem paths for commands and data, supports home path expansion. | <code title="object({ credentials = string config_dir = string openshift_install = string pull_secret = string ssh_key = string })">object({…})</code> | ✓ | |
|
||||
| [host_project](variables.tf#L55) | Shared VPC project and network configuration. | <code title="object({ default_subnet_name = string masters_subnet_name = string project_id = string vpc_name = string workers_subnet_name = string })">object({…})</code> | ✓ | |
|
||||
| [service_project](variables.tf#L125) | Service project configuration. | <code title="object({ project_id = string })">object({…})</code> | ✓ | |
|
||||
| [allowed_ranges](variables.tf#L17) | Ranges that can SSH to the boostrap VM and API endpoint. | <code>list(any)</code> | | <code>["10.0.0.0/8"]</code> |
|
||||
| [allowed_ranges](variables.tf#L17) | Ranges that can SSH to the bootstrap VM and API endpoint. | <code>list(any)</code> | | <code>["10.0.0.0/8"]</code> |
|
||||
| [disk_encryption_key](variables.tf#L28) | Optional CMEK for disk encryption. | <code title="object({ keyring = string location = string name = string project_id = string })">object({…})</code> | | <code>null</code> |
|
||||
| [install_config_params](variables.tf#L68) | OpenShift cluster configuration. | <code title="object({ disk_size = number labels = map(string) network = object({ cluster = string host_prefix = number machine = string service = string }) proxy = object({ http = string https = string noproxy = string }) })">object({…})</code> | | <code title="{ disk_size = 16 labels = {} network = { cluster = "10.128.0.0/14" host_prefix = 23 machine = "10.0.0.0/16" service = "172.30.0.0/16" } proxy = null }">{…}</code> |
|
||||
| [post_bootstrap_config](variables.tf#L103) | Name of the service account for the machine operator. Removes bootstrap resources when set. | <code title="object({ machine_op_sa_prefix = string })">object({…})</code> | | <code>null</code> |
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
|
||||
variable "allowed_ranges" {
|
||||
description = "Ranges that can SSH to the boostrap VM and API endpoint."
|
||||
description = "Ranges that can SSH to the bootstrap VM and API endpoint."
|
||||
type = list(any)
|
||||
default = ["10.0.0.0/8"]
|
||||
}
|
||||
|
|
|
@ -249,7 +249,7 @@ This second set of files is disabled by default, you can enable it by setting th
|
|||
outputs_location = "~/fast-config"
|
||||
```
|
||||
|
||||
Once the variable is set, `apply` will generate and manage providers and variables files, including the initial one used for this stage after the first run. You can then link these files in the relevant stages, instead of manually transfering outputs from one stage, to Terraform variables in another.
|
||||
Once the variable is set, `apply` will generate and manage providers and variables files, including the initial one used for this stage after the first run. You can then link these files in the relevant stages, instead of manually transferring outputs from one stage, to Terraform variables in another.
|
||||
|
||||
Below is the outline of the output files generated by all stages, which is identical for both the GCS and local filesystem copies:
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ terraform apply
|
|||
|
||||
### Post-deployment activities
|
||||
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible.
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible.
|
||||
- On-prem routers should accept BGP sessions from their cloud peers.
|
||||
- On-prem DNS servers should have forward zones for GCP-managed ones.
|
||||
|
||||
|
|
|
@ -290,7 +290,7 @@ terraform apply
|
|||
|
||||
### Post-deployment activities
|
||||
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible.
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible.
|
||||
- On-prem routers should accept BGP sessions from their cloud peers.
|
||||
- On-prem DNS servers should have forward zones for GCP-managed ones.
|
||||
|
||||
|
@ -386,7 +386,7 @@ Copy `vpn-spoke-dev.tf` to `vpn-spoke-staging.tf` - replace `dev` with `staging`
|
|||
VPN configuration also controls BGP advertisements, which requires the following variable changes:
|
||||
|
||||
- `router_configs` to configure the new routers (one per region) created for the `staging` VPC
|
||||
- `vpn_onprem_configs` to configure the new advertisments to on-premises for the new CIDRs
|
||||
- `vpn_onprem_configs` to configure the new advertisements to on-premises for the new CIDRs
|
||||
- `vpn_spoke_configs` to configure the new advertisements to `landing` for the new VPC - new keys (one per region) should be added, such as e.g. `staging-ew1` and `staging-ew4`
|
||||
|
||||
DNS configurations are centralised in the `dns-*.tf` files. Spokes delegate DNS resolution to Landing through DNS peering, and optionally define a private zone (e.g. `dev.gcp.example.com`) which the landing peers to. To configure DNS for a new environment, copy one of the other environments DNS files [e.g. (dns-dev.tf)](dns-dev.tf) into a new `dns-*.tf` file suffixed with the environment name (e.g. `dns-staging.tf`), and update its content accordingly. Don't forget to add a peering zone from the landing to the newly created environment private zone.
|
||||
|
|
|
@ -358,7 +358,7 @@ terraform apply
|
|||
|
||||
### Post-deployment activities
|
||||
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible.
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible.
|
||||
- On-prem routers should accept BGP sessions from their cloud peers.
|
||||
- On-prem DNS servers should have forward zones for GCP-managed ones.
|
||||
|
||||
|
|
|
@ -238,7 +238,7 @@ terraform apply
|
|||
|
||||
### Post-deployment activities
|
||||
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible.
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible.
|
||||
- On-prem routers should accept BGP sessions from their cloud peers.
|
||||
- On-prem DNS servers should have forward zones for GCP-managed ones.
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ The "landing zone" is divided into two VPC networks:
|
|||
|
||||
### NCC, NVAs and BGP sessions
|
||||
|
||||
The VPCs connect through two sets of sample NVA machines: one per region, each containing two instances. The appliances run [Contrainer-Optimized OS](https://cloud.google.com/container-optimized-os/docs) and a container with [FRRouting](https://frrouting.org/).
|
||||
The VPCs connect through two sets of sample NVA machines: one per region, each containing two instances. The appliances run [Container-Optimized OS](https://cloud.google.com/container-optimized-os/docs) and a container with [FRRouting](https://frrouting.org/).
|
||||
|
||||
We levarage NCC-RA to allow the NVAs to establish BGP sessions with Cloud Routers in the untrusted and in the trusted VPCs. This allows Cloud Routers to advertise routes to the NVAs, and the NVAs to announce routes to the Cloud Router, so it can program them in the VPC.
|
||||
|
||||
|
@ -92,7 +92,7 @@ Specifically, each NVA establishes two BGP sessions (for redundancy) with the th
|
|||
|
||||
**Cloud Routers in the untrusted VPC advertise the default route (0.0.0.0/0) to the NVAs**. The NVAs advertise the route to the Cloud Routers in the trusted VPC. These dynamic routes are then imported through VPC peerings in the spokes.
|
||||
|
||||
**Cloud Routers in the trusted hub advertis to the NVAs** all the subnets of the trusted VPCs. This includes the regional subnets and the cross-regional subnets. The NVAs manipulate the route costs (MED) before advertising them to the Cloud Routers in the untrusted VPC. This is done to guarantee symmetric traffic paths (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)).
|
||||
**Cloud Routers in the trusted hub adverts to the NVAs** all the subnets of the trusted VPCs. This includes the regional subnets and the cross-regional subnets. The NVAs manipulate the route costs (MED) before advertising them to the Cloud Routers in the untrusted VPC. This is done to guarantee symmetric traffic paths (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)).
|
||||
|
||||
NVAs establish **extra BGP sessions with both cross-regional NVAs**. In this case, the NVAs advertise the regional trusted routes only. This allows cross-spoke (environment) traffic to remain also symmetric (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)). We set these routes to be exchanged at a lower cost than the one set for the other routes.
|
||||
|
||||
|
@ -380,7 +380,7 @@ terraform apply
|
|||
|
||||
### Post-deployment activities
|
||||
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible.
|
||||
- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible.
|
||||
- On-prem routers should accept BGP sessions from their cloud peers.
|
||||
- On-prem DNS servers should have forward zones for GCP-managed ones.
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ module "spokes-untrusted" {
|
|||
|
||||
custom_advertise = {
|
||||
all_subnets = false
|
||||
ip_ranges = { "0.0.0.0/0" = "Deafult route." }
|
||||
ip_ranges = { "0.0.0.0/0" = "Default route." }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -311,8 +311,8 @@ Some references that might be useful in setting up this stage:
|
|||
| [kms_keys](variables.tf#L73) | KMS keys to create, keyed by name. Null attributes will be interpolated with defaults. | <code title="map(object({ iam = map(list(string)) labels = map(string) locations = list(string) rotation_period = string }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [outputs_location](variables.tf#L94) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | <code>string</code> | | <code>null</code> | |
|
||||
| [vpc_sc_access_levels](variables.tf#L122) | VPC SC access level definitions. | <code title="map(object({ combining_function = optional(string) conditions = optional(list(object({ device_policy = optional(object({ allowed_device_management_levels = optional(list(string)) allowed_encryption_statuses = optional(list(string)) require_admin_approval = bool require_corp_owned = bool require_screen_lock = optional(bool) os_constraints = optional(list(object({ os_type = string minimum_version = optional(string) require_verified_chrome_os = optional(bool) }))) })) ip_subnetworks = optional(list(string), []) members = optional(list(string), []) negate = optional(bool) regions = optional(list(string), []) required_access_levels = optional(list(string), []) })), []) description = optional(string) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [vpc_sc_egress_policies](variables.tf#L151) | VPC SC egress policy defnitions. | <code title="map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") identities = optional(list(string)) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) resource_type_external = optional(bool, false) }) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [vpc_sc_ingress_policies](variables.tf#L171) | VPC SC ingress policy defnitions. | <code title="map(object({ from = object({ access_levels = optional(list(string), []) identity_type = optional(string) identities = optional(list(string)) resources = optional(list(string), []) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) }) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [vpc_sc_egress_policies](variables.tf#L151) | VPC SC egress policy definitions. | <code title="map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") identities = optional(list(string)) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) resource_type_external = optional(bool, false) }) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [vpc_sc_ingress_policies](variables.tf#L171) | VPC SC ingress policy definitions. | <code title="map(object({ from = object({ access_levels = optional(list(string), []) identity_type = optional(string) identities = optional(list(string)) resources = optional(list(string), []) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) }) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [vpc_sc_perimeters](variables.tf#L192) | VPC SC regular perimeter definitions. | <code title="object({ dev = optional(object({ access_levels = optional(list(string), []) egress_policies = optional(list(string), []) ingress_policies = optional(list(string), []) resources = optional(list(string), []) }), {}) landing = optional(object({ access_levels = optional(list(string), []) egress_policies = optional(list(string), []) ingress_policies = optional(list(string), []) resources = optional(list(string), []) }), {}) prod = optional(object({ access_levels = optional(list(string), []) egress_policies = optional(list(string), []) ingress_policies = optional(list(string), []) resources = optional(list(string), []) }), {}) })">object({…})</code> | | <code>{}</code> | |
|
||||
|
||||
## Outputs
|
||||
|
|
|
@ -149,7 +149,7 @@ variable "vpc_sc_access_levels" {
|
|||
}
|
||||
|
||||
variable "vpc_sc_egress_policies" {
|
||||
description = "VPC SC egress policy defnitions."
|
||||
description = "VPC SC egress policy definitions."
|
||||
type = map(object({
|
||||
from = object({
|
||||
identity_type = optional(string, "ANY_IDENTITY")
|
||||
|
@ -169,7 +169,7 @@ variable "vpc_sc_egress_policies" {
|
|||
}
|
||||
|
||||
variable "vpc_sc_ingress_policies" {
|
||||
description = "VPC SC ingress policy defnitions."
|
||||
description = "VPC SC ingress policy definitions."
|
||||
type = map(object({
|
||||
from = object({
|
||||
access_levels = optional(list(string), [])
|
||||
|
|
|
@ -62,15 +62,15 @@ The default configuration will implement 3 tags:
|
|||
|
||||
Anything that is not tagged is available to all users who have access to the data warehouse.
|
||||
|
||||
You can configure your tags and roles associated by configuring the `data_catalog_tags` variable. We suggest useing the "[Best practices for using policy tags in BigQuery](https://cloud.google.com/bigquery/docs/best-practices-policy-tags)" article as a guide to designing your tags structure and access pattern. By default, no groups has access to tagged data.
|
||||
You can configure your tags and roles associated by configuring the `data_catalog_tags` variable. We suggest using the "[Best practices for using policy tags in BigQuery](https://cloud.google.com/bigquery/docs/best-practices-policy-tags)" article as a guide to designing your tags structure and access pattern. By default, no groups has access to tagged data.
|
||||
|
||||
### VPC-SC
|
||||
|
||||
As is often the case in real-world configurations, [VPC-SC](https://cloud.google.com/vpc-service-controls) is needed to mitigate data exfiltration. VPC-SC can be configured from the [FAST security stage](../../2-security). This step is optional, but highly recomended, and depends on customer policies and security best practices.
|
||||
As is often the case in real-world configurations, [VPC-SC](https://cloud.google.com/vpc-service-controls) is needed to mitigate data exfiltration. VPC-SC can be configured from the [FAST security stage](../../2-security). This step is optional, but highly recommended, and depends on customer policies and security best practices.
|
||||
|
||||
To configure the use of VPC-SC on the data platform, you have to specify the data platform project numbers on the `vpc_sc_perimeter_projects.dev` variable on [FAST security stage](../../2-security#perimeter-resources).
|
||||
|
||||
In the case your Data Warehouse need to handle confidential data and you have the requirement to separate them deeply from other data and IAM is not enough, the suggested configuration is to keep the confidential project in a separate VPC-SC perimeter with the adequate ingress/egress rules needed for the load and tranformation service account. Below you can find an high level diagram describing the configuration.
|
||||
In the case your Data Warehouse need to handle confidential data and you have the requirement to separate them deeply from other data and IAM is not enough, the suggested configuration is to keep the confidential project in a separate VPC-SC perimeter with the adequate ingress/egress rules needed for the load and transformation service account. Below you can find an high level diagram describing the configuration.
|
||||
|
||||
<p align="center">
|
||||
<img src="diagram_vpcsc.png" alt="Data Platform VPC-SC diagram">
|
||||
|
|
|
@ -131,7 +131,7 @@ This stage is designed with multi-tenancy in mind, and the expectation is that
|
|||
|
||||
- the `cluster_default` variable allows defining common defaults for all clusters
|
||||
- the `clusters` variable is used to declare the actual GKE clusters and allows overriding defaults on a per-cluster basis
|
||||
- the `nodepool_defaults` variable allows definining common defaults for all node pools
|
||||
- the `nodepool_defaults` variable allows defining common defaults for all node pools
|
||||
- the `nodepools` variable is used to declare cluster node pools and allows overriding defaults on a per-cluster basis
|
||||
|
||||
There are two additional variables that influence cluster configuration: `authenticator_security_group` to configure [Google Groups for RBAC](https://cloud.google.com/kubernetes-engine/docs/how-to/google-groups-rbac), `dns_domain` to configure [Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns).
|
||||
|
@ -171,7 +171,7 @@ Leave all these variables unset (or set to `null`) to disable fleet management.
|
|||
| [clusters](variables.tf#L42) | Clusters configuration. Refer to the gke-cluster module for type details. | <code title="map(object({ cluster_autoscaling = optional(any) description = optional(string) enable_addons = optional(any, { horizontal_pod_autoscaling = true, http_load_balancing = true }) enable_features = optional(any, { workload_identity = true }) issue_client_certificate = optional(bool, false) labels = optional(map(string)) location = string logging_config = optional(list(string), ["SYSTEM_COMPONENTS"]) maintenance_config = optional(any, { daily_window_start_time = "03:00" recurring_window = null maintenance_exclusion = [] }) max_pods_per_node = optional(number, 110) min_master_version = optional(string) monitoring_config = optional(object({ enable_components = optional(list(string), ["SYSTEM_COMPONENTS"]) managed_prometheus = optional(bool) })) node_locations = optional(list(string)) private_cluster_config = optional(any) release_channel = optional(string) vpc_config = object({ subnetwork = string network = optional(string) secondary_range_blocks = optional(object({ pods = string services = string })) secondary_range_names = optional(object({ pods = string services = string }), { pods = "pods", services = "services" }) master_authorized_ranges = optional(map(string)) master_ipv4_cidr_block = optional(string) }) }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [fleet_configmanagement_clusters](variables.tf#L90) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | <code>map(list(string))</code> | | <code>{}</code> | |
|
||||
| [fleet_configmanagement_templates](variables.tf#L98) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | <code title="map(object({ binauthz = bool config_sync = object({ git = object({ gcp_service_account_email = string https_proxy = string policy_dir = string secret_type = string sync_branch = string sync_repo = string sync_rev = string sync_wait_secs = number }) prevent_drift = string source_format = string }) hierarchy_controller = object({ enable_hierarchical_resource_quota = bool enable_pod_tree_labels = bool }) policy_controller = object({ audit_interval_seconds = number exemptable_namespaces = list(string) log_denies_enabled = bool referential_rules_enabled = bool template_library_installed = bool }) version = string }))">map(object({…}))</code> | | <code>{}</code> | |
|
||||
| [fleet_features](variables.tf#L133) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | <code title="object({ appdevexperience = bool configmanagement = bool identityservice = bool multiclusteringress = string multiclusterservicediscovery = bool servicemesh = bool })">object({…})</code> | | <code>null</code> | |
|
||||
| [fleet_features](variables.tf#L133) | Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | <code title="object({ appdevexperience = bool configmanagement = bool identityservice = bool multiclusteringress = string multiclusterservicediscovery = bool servicemesh = bool })">object({…})</code> | | <code>null</code> | |
|
||||
| [fleet_workload_identity](variables.tf#L146) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | <code>bool</code> | | <code>false</code> | |
|
||||
| [group_iam](variables.tf#L161) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | <code>map(list(string))</code> | | <code>{}</code> | |
|
||||
| [iam](variables.tf#L176) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | <code>map(list(string))</code> | | <code>{}</code> | |
|
||||
|
|
|
@ -131,7 +131,7 @@ variable "fleet_configmanagement_templates" {
|
|||
}
|
||||
|
||||
variable "fleet_features" {
|
||||
description = "Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used."
|
||||
description = "Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used."
|
||||
type = object({
|
||||
appdevexperience = bool
|
||||
configmanagement = bool
|
||||
|
|
|
@ -4,7 +4,7 @@ This module simplifies the creation of a Binary Authorization policy, attestors
|
|||
|
||||
## Example
|
||||
|
||||
### Binary Athorization
|
||||
### Binary Authorization
|
||||
|
||||
```hcl
|
||||
module "binauthz" {
|
||||
|
|
|
@ -5,7 +5,7 @@ This set of modules creates specialized [cloud-config](https://cloud.google.com/
|
|||
These modules are designed for several use cases:
|
||||
|
||||
- to quickly prototype specialized services (eg MySQL access or HTTP serving) for prototyping infrastructure
|
||||
- to emulate production services for perfomance testing
|
||||
- to emulate production services for performance testing
|
||||
- to easily add glue components for services like DNS (eg to work around inbound/outbound forwarding limitations)
|
||||
- to implement cloud-native production deployments that leverage cloud-init for configuration management, without the need of a separate tool
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ _stop_ipsec() {
|
|||
}
|
||||
trap _stop_ipsec TERM
|
||||
|
||||
# Making the containter to work as a default gateway for LAN_NETWORKS
|
||||
# Making the container to work as a default gateway for LAN_NETWORKS
|
||||
iptables -t nat -A POSTROUTING -s ${LAN_NETWORKS} -o ${VPN_DEVICE} -m policy --dir out --pol ipsec -j ACCEPT
|
||||
iptables -t nat -A POSTROUTING -s ${LAN_NETWORKS} -o ${VPN_DEVICE} -j MASQUERADE
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ case "${PLUTO_VERB}" in
|
|||
# Enable loosy source validation, if possible. Otherwise disable validation.
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.${VTI_IF}.rp_filter=2 || sysctl -w net.ipv4.conf.${VTI_IF}.rp_filter=0
|
||||
|
||||
# If you would like to use VTI for policy-based you shoud take care of routing by yourselv, e.x.
|
||||
# If you would like to use VTI for policy-based you should take care of routing by yourselv, e.x.
|
||||
if [[ "${PLUTO_PEER_CLIENT}" != "0.0.0.0/0" ]]; then
|
||||
${IP} r add "${PLUTO_PEER_CLIENT}" dev "${VTI_IF}"
|
||||
fi
|
||||
|
|
|
@ -33,7 +33,7 @@ http_access deny !safe_ports
|
|||
# deny CONNECT if connection is not using ssl
|
||||
http_access deny CONNECT !ssl_ports
|
||||
|
||||
# deny acccess to cachemgr
|
||||
# deny access to cachemgr
|
||||
http_access deny manager
|
||||
|
||||
# deny access to localhost through the proxy
|
||||
|
|
|
@ -101,7 +101,7 @@ module "cloud_run" {
|
|||
|
||||
### VPC Access Connector creation
|
||||
|
||||
If creation of a [VPC Access Connector](https://cloud.google.com/vpc/docs/serverless-vpc-access) is required, use the `vpc_connector_create` variable which also support optional attribtues for number of instances, machine type, and throughput (not shown here). The annotation to use the connector will be added automatically.
|
||||
If creation of a [VPC Access Connector](https://cloud.google.com/vpc/docs/serverless-vpc-access) is required, use the `vpc_connector_create` variable which also support optional attributes for number of instances, machine type, and throughput (not shown here). The annotation to use the connector will be added automatically.
|
||||
|
||||
```hcl
|
||||
module "cloud_run" {
|
||||
|
|
|
@ -49,7 +49,7 @@ module "simple-vm-example" {
|
|||
|
||||
VM service accounts can be managed in three different ways:
|
||||
|
||||
- You can let the module create a service account for you by settting `service_account_create = true`
|
||||
- You can let the module create a service account for you by setting `service_account_create = true`
|
||||
- You can use an existing service account by setting `service_account_create = false` (the default value) and passing the full email address of the service account to the `service_account` variable. This is useful, for example, if you want to reuse the service account from another previously created instance, or if you want to create the service account manually with the `iam-service-account` module. In this case, you probably also want to set `service_account_scopes` to `cloud-platform`.
|
||||
- Lastly, you can use the default compute service account by setting `service_account_crate = false`. Please note that using the default compute service account is not recommended.
|
||||
|
||||
|
@ -204,7 +204,7 @@ module "vm-disk-options-example" {
|
|||
|
||||
#### Internal and external IPs
|
||||
|
||||
By default VNs are create with an automatically assigned IP addresses, but you can change it through the `addreses` and `nat` attributes of the `network_interfaces` variable:
|
||||
By default VNs are create with an automatically assigned IP addresses, but you can change it through the `addresses` and `nat` attributes of the `network_interfaces` variable:
|
||||
|
||||
```hcl
|
||||
module "vm-internal-ip" {
|
||||
|
|
|
@ -40,7 +40,7 @@ host: "echo-api.endpoints.YOUR-PROJECT-ID.cloud.goog"
|
|||
|---|---|:---:|:---:|:---:|
|
||||
| [openapi_config](variables.tf#L32) | The configuration for an OpenAPI endopoint. Either this or grpc_config must be specified. | <code title="object({ yaml_path = string })">object({…})</code> | ✓ | |
|
||||
| [service_name](variables.tf#L45) | The name of the service. Usually of the form '$apiname.endpoints.$projectid.cloud.goog'. | <code>string</code> | ✓ | |
|
||||
| [grpc_config](variables.tf#L17) | The configuration for a gRPC enpoint. Either this or openapi_config must be specified. | <code title="object({ yaml_path = string protoc_output_path = string })">object({…})</code> | | <code>null</code> |
|
||||
| [grpc_config](variables.tf#L17) | The configuration for a gRPC endpoint. Either this or openapi_config must be specified. | <code title="object({ yaml_path = string protoc_output_path = string })">object({…})</code> | | <code>null</code> |
|
||||
| [iam](variables.tf#L26) | IAM bindings for topic in {ROLE => [MEMBERS]} format. | <code>map(list(string))</code> | | <code>{}</code> |
|
||||
| [project_id](variables.tf#L39) | The project ID that the service belongs to. | <code>string</code> | | <code>null</code> |
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
|
||||
variable "grpc_config" {
|
||||
description = "The configuration for a gRPC enpoint. Either this or openapi_config must be specified."
|
||||
description = "The configuration for a gRPC endpoint. Either this or openapi_config must be specified."
|
||||
type = object({
|
||||
yaml_path = string
|
||||
protoc_output_path = string
|
||||
|
|
|
@ -266,7 +266,7 @@ resource "google_container_cluster" "cluster" {
|
|||
}
|
||||
}
|
||||
|
||||
# dataplane v2 has bult-in network policies
|
||||
# dataplane v2 has built-in network policies
|
||||
dynamic "network_policy" {
|
||||
for_each = (
|
||||
var.enable_addons.network_policy && !var.enable_features.dataplane_v2
|
||||
|
|
|
@ -326,7 +326,7 @@ module "hub" {
|
|||
| [clusters](variables.tf#L17) | Clusters members of this GKE Hub in name => id format. | <code>map(string)</code> | | <code>{}</code> |
|
||||
| [configmanagement_clusters](variables.tf#L24) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | <code>map(list(string))</code> | | <code>{}</code> |
|
||||
| [configmanagement_templates](variables.tf#L31) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | <code title="map(object({ binauthz = bool config_sync = object({ git = object({ gcp_service_account_email = string https_proxy = string policy_dir = string secret_type = string sync_branch = string sync_repo = string sync_rev = string sync_wait_secs = number }) prevent_drift = string source_format = string }) hierarchy_controller = object({ enable_hierarchical_resource_quota = bool enable_pod_tree_labels = bool }) policy_controller = object({ audit_interval_seconds = number exemptable_namespaces = list(string) log_denies_enabled = bool referential_rules_enabled = bool template_library_installed = bool }) version = string }))">map(object({…}))</code> | | <code>{}</code> |
|
||||
| [features](variables.tf#L66) | Enable and configue fleet features. | <code title="object({ appdevexperience = optional(bool, false) configmanagement = optional(bool, false) identityservice = optional(bool, false) multiclusteringress = optional(string, null) multiclusterservicediscovery = optional(bool, false) servicemesh = optional(bool, false) })">object({…})</code> | | <code title="{ appdevexperience = false configmanagement = false identityservice = false multiclusteringress = null servicemesh = false multiclusterservicediscovery = false }">{…}</code> |
|
||||
| [features](variables.tf#L66) | Enable and configure fleet features. | <code title="object({ appdevexperience = optional(bool, false) configmanagement = optional(bool, false) identityservice = optional(bool, false) multiclusteringress = optional(string, null) multiclusterservicediscovery = optional(bool, false) servicemesh = optional(bool, false) })">object({…})</code> | | <code title="{ appdevexperience = false configmanagement = false identityservice = false multiclusteringress = null servicemesh = false multiclusterservicediscovery = false }">{…}</code> |
|
||||
| [workload_identity_clusters](variables.tf#L92) | Clusters that will use Fleet Workload Identity. | <code>list(string)</code> | | <code>[]</code> |
|
||||
|
||||
## Outputs
|
||||
|
|
|
@ -64,7 +64,7 @@ variable "configmanagement_templates" {
|
|||
}
|
||||
|
||||
variable "features" {
|
||||
description = "Enable and configue fleet features."
|
||||
description = "Enable and configure fleet features."
|
||||
type = object({
|
||||
appdevexperience = optional(bool, false)
|
||||
configmanagement = optional(bool, false)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
This module allows managing Global HTTP/HTTPS Classic Load Balancers (GLBs). It's designed to expose the full configuration of the underlying resources, and to facilitate common usage patterns by providing sensible defaults, and optionally managing prerequisite resources like health checks, instance groups, etc.
|
||||
|
||||
Due to the complexity of the underlying resources, changes to the configuration that involve recreation of resources are best applied in stages, starting by disabling the configuration in the urlmap that references the resources that neeed recreation, then doing the same for the backend service, etc.
|
||||
Due to the complexity of the underlying resources, changes to the configuration that involve recreation of resources are best applied in stages, starting by disabling the configuration in the urlmap that references the resources that need recreation, then doing the same for the backend service, etc.
|
||||
|
||||
## Examples
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ output "filtered-projects" {
|
|||
| [parent](variables.tf#L55) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | <code>string</code> | ✓ | |
|
||||
| [ignore_folders](variables.tf#L17) | A list of folder IDs or numbers to be excluded from the output, all the subfolders and projects are exluded from the output regardless of the include_projects variable. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [ignore_projects](variables.tf#L28) | A list of project IDs, numbers or prefixes to exclude matching projects from the module output. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [include_projects](variables.tf#L41) | A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wilcard entries. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [include_projects](variables.tf#L41) | A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wildcard entries. | <code>list(string)</code> | | <code>[]</code> |
|
||||
| [query](variables.tf#L64) | A string query as defined in the [Query Syntax](https://cloud.google.com/asset-inventory/docs/query-syntax). | <code>string</code> | | <code>"state:ACTIVE"</code> |
|
||||
|
||||
## Outputs
|
||||
|
|
|
@ -39,7 +39,7 @@ variable "ignore_projects" {
|
|||
}
|
||||
|
||||
variable "include_projects" {
|
||||
description = "A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wilcard entries."
|
||||
description = "A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wildcard entries."
|
||||
type = list(string)
|
||||
default = []
|
||||
# example excluding all the projects starting with "prf-" except "prd-123457"
|
||||
|
|
|
@ -17,7 +17,7 @@ import yaml
|
|||
|
||||
|
||||
def test_defaults(plan_summary):
|
||||
"Test defalt configuration."
|
||||
"Test default configuration."
|
||||
# _, output = apply_runner(mysql_password='foo')
|
||||
summary = plan_summary('modules/cloud-config-container/mysql/',
|
||||
mysql_password='foo')
|
||||
|
|
|
@ -75,10 +75,10 @@ def get_bindings(resources, prefix=None, folders=None):
|
|||
# Handle Cloud Services Service Account
|
||||
if member_domain == 'cloudservices.gserviceaccount.com':
|
||||
member_id = "PROJECT_CLOUD_SERVICES"
|
||||
# Handle Cloud Service Identity Service Acocunt
|
||||
# Handle Cloud Service Identity Service Account
|
||||
if re.match("^service-\d{8}", member_id):
|
||||
member_id = "SERVICE_IDENTITY_" + member_domain.split(".", 1)[0]
|
||||
# Handle BQ Cloud Service Identity Service Acocunt
|
||||
# Handle BQ Cloud Service Identity Service Account
|
||||
if re.match("^bq-\d{8}", member_id):
|
||||
member_id = "IDENTITY_" + member_domain.split(".", 1)[0]
|
||||
resource_type_output = "Service Identity - " + resource_type
|
||||
|
|
Loading…
Reference in New Issue