Compare commits
272 Commits
5edc931bf9
...
d245088fa1
Author | SHA1 | Date |
---|---|---|
dependabot[bot] | d245088fa1 | |
Julio Castillo | aa850ead5f | |
Julio Castillo | fddf8b52bc | |
Julio Castillo | 3e67fc00ca | |
Ana Fernandez | 00efd6099f | |
Ana Fernandez del Alamo | 4129eb11ae | |
Ana Fernandez | 8254303dc3 | |
Ana Fernandez del Alamo | a5bbd09776 | |
Ludovico Magnocavallo | e0911c6291 | |
Roberto Jung Drebes | d2f0b17ec4 | |
Bharath KKB | 47ae6dc7c3 | |
lcaggio | 40656a23de | |
Alejandro Leal | 6a89d71e96 | |
Julio Castillo | 56132ffb03 | |
Alejandro Leal | 31c1c8534a | |
Alejandro Leal | a0beec747e | |
Julio Castillo | 5bd3aa4d5f | |
Alejandro Leal | 11f7f3efed | |
Alejandro Leal | 65e6ef98fa | |
Alejandro Leal | a2a2942e2c | |
Alejandro Leal | 0ad21351c0 | |
Natalia Strelkova | 85407109c8 | |
Natalia Strelkova | c4ec4868c2 | |
Natalia Strelkova | f5b10fa3da | |
Alex Ostapenko | 7861ea74b8 | |
Julio Castillo | 78ed6a8af6 | |
Ludovico Magnocavallo | 2cbd85c8e0 | |
Manuel Aller | dd1e5dc463 | |
Alejandro Leal | 87cd83f5c0 | |
apichick | 60d579be4d | |
Julio Castillo | 8e1e761577 | |
Miren Esnaola | 4df6c90d12 | |
Julio Castillo | d20a078134 | |
Julio Castillo | b153474468 | |
Jay Schwerberg | 20b8002602 | |
Julio Castillo | 7d9f52298c | |
Julio Castillo | b8b6f0dcc2 | |
Fawzi | ac349332c4 | |
lcaggio | 3cc6c71e96 | |
Julio Diez | f0d928f08d | |
Julio Diez | 0e9b685d9d | |
Julio Diez | 404920658c | |
Julio Diez | 5454817919 | |
Julio Diez | b9e7bf7aaa | |
Julio Diez | eadc1c22a4 | |
Julio Diez | bf182bf600 | |
Julio Diez | 8c7b6b2410 | |
Julio Diez | 76ce3e8b3e | |
Jack P | 491b52f023 | |
Julio Diez | 252be12bd4 | |
Julio Diez | 9eea6e3bbc | |
Julio Diez | aec85a20c9 | |
Julio Castillo | bf9e8eeb5f | |
Julio Castillo | 87281f2017 | |
Julio Castillo | 176209ccda | |
Julio Castillo | 72b5944fb3 | |
Julio Castillo | 873c45d6c6 | |
Julio Castillo | 05ed1bf12f | |
Ludo | 94d3301834 | |
Prabha Arya | c344606ae5 | |
lcaggio | 4d66623de1 | |
Toby Archer | e333b6ffa2 | |
Prabha Arya | 23299f7bd7 | |
Ludo | 47bd0987fa | |
Ludovico Magnocavallo | efb0ebe689 | |
apichick | 234aa4c55d | |
Miren Esnaola | 75e0a092aa | |
Ludo | 933dc202b7 | |
Ludovico Magnocavallo | 75cc2f3d7a | |
Ludo | 7b3f209fbd | |
Brent Walker | 5763eb53d4 | |
Ludovico Magnocavallo | 98c8643886 | |
Bob Idle | 94589efc34 | |
Bharath KKB | 1547a55e1f | |
Julio Castillo | 99c8846c94 | |
Julio Castillo | 6079bf6dea | |
Julio Castillo | f5b5bf9327 | |
Julio Castillo | 6f06ca5781 | |
Julio Castillo | 127787c65e | |
Miren Esnaola | 2637d1e9aa | |
Julio Castillo | 6fa3df882e | |
Julio Castillo | 1e0b1da0d2 | |
Julio Castillo | eedd805bd6 | |
Julio Castillo | f4b8a61981 | |
Julio Castillo | 4245faae73 | |
Julio Castillo | ee4a908b10 | |
Julio Castillo | 78867b0f99 | |
Julio Castillo | a09959539c | |
Julio Castillo | cc73c30c08 | |
Julio Castillo | 3cd3106695 | |
Julio Castillo | 7dd30f104f | |
Julio Castillo | 0bc89d9942 | |
Julio Castillo | b8d601bd63 | |
Julio Castillo | 175d1d3893 | |
Julio Castillo | 49bb72d461 | |
Julio Castillo | 89fe36b4f4 | |
Julio Castillo | 1b4e8eb305 | |
Julio Castillo | 20ccf7416f | |
Julio Castillo | b097d297ff | |
Julio Castillo | 84a7b988a3 | |
Julio Castillo | bca8a33f1e | |
Julio Castillo | 8e55374717 | |
Julio Castillo | 2d76f80967 | |
Julio Castillo | e61df0aa3b | |
Julio Castillo | 016a4e08ae | |
Julio Castillo | df7cf3d278 | |
Julio Castillo | cb13d481d6 | |
Julio Castillo | eab9053e16 | |
Julio Castillo | 1649787905 | |
Julio Castillo | 1c7f8f5907 | |
Julio Castillo | 230a1034ec | |
Julio Castillo | 924230eb12 | |
Julio Castillo | 4abe5e2baf | |
Julio Castillo | c819305a42 | |
Julio Castillo | 21d25c999f | |
Julio Castillo | d6ee1b6551 | |
Avinash Jha | e881537f87 | |
Miren Esnaola | 93ceab5be9 | |
lcaggio | 8488e866bc | |
lcaggio | b896ccb9e5 | |
lcaggio | 9fdf80738f | |
lcaggio | 30bef8546f | |
lcaggio | 15a4dcdd71 | |
lcaggio | f5c5ac0606 | |
lcaggio | f4490fcaea | |
Julio Castillo | df8c61fe69 | |
bjohnrl | e24b993550 | |
lcaggio | 1f4fac2f1d | |
lcaggio | edf67fc5d0 | |
bjohnrl | 5807c03d88 | |
Julio Castillo | 66f00903c6 | |
jamesmao-xyz | e1ebfeecad | |
Ludovico Magnocavallo | 91a1168d1d | |
Ludo | b936a7cc8f | |
Julio Castillo | 7b37bd1d57 | |
Julio Castillo | 4b2e2d2ef7 | |
lcaggio | 344f74d11b | |
lcaggio | 306b38295e | |
Julio Diez | 087b4c40b4 | |
Julio Diez | d9b9f4d347 | |
Julio Diez | dc608e24c4 | |
Julio Diez | 102c8ed0fb | |
Ludovico Magnocavallo | 121bc30e90 | |
Julio Castillo | c28f7e69e9 | |
Julio Castillo | e8ff5080b9 | |
Julio Castillo | 4770cb06fd | |
Julio Castillo | 461f7780c2 | |
Julio Castillo | 5f82938739 | |
Julio Castillo | 771dd02b18 | |
Julio Castillo | 0150e97e51 | |
Julio Castillo | 3dd27b6248 | |
Julio Castillo | 01b9921f5c | |
Julio Castillo | 1927b138d6 | |
Julio Castillo | a2bffc7302 | |
Julio Castillo | ed49f3db39 | |
Julio Castillo | f5e1717319 | |
Ludovico Magnocavallo | 9072c3472e | |
Julio Castillo | 09945a24eb | |
Julio Castillo | 2d4bd5a244 | |
Julio Castillo | ac59bdeb75 | |
Julio Castillo | 5e9b7091c2 | |
Julio Castillo | ff80e66a38 | |
Bogdan A | 4443fe90f5 | |
bgdanix | c6e79ddfc4 | |
Julio Castillo | 23302b1efd | |
Julio Castillo | eca0a9583e | |
Antonio Lopez | a50473866f | |
Julio Castillo | 1cf6ade31b | |
apichick | a59907155d | |
ajlopezn | 7929f4186c | |
Antonio Lopez | f0bdc0d862 | |
ajlopezn | 063c5061c8 | |
apichick | 192ebffb98 | |
Dazbo | 56261101c3 | |
ajlopezn | df789db9bd | |
Antonio Lopez | b4156eff41 | |
Miren Esnaola | 8b4f29ac24 | |
Julio Castillo | f41362eb8f | |
Fawzi | 3673dc2774 | |
Fawzi | 2ff0b47218 | |
Fawzi | ebf93a66ad | |
Julio Castillo | e20c092d70 | |
Fawzi | 4722efea40 | |
Fawzi | 5f450300d0 | |
Fawzi | 3c4254fc1f | |
ajlopezn | 6db1a5f5d3 | |
ajlopezn | d150f03b41 | |
ajlopezn | 4e426a990a | |
ajlopezn | 6f1e531af0 | |
ajlopezn | b809b315f6 | |
Jack Powell | 024801dc74 | |
Antonio Lopez | 660da5b585 | |
ajlopezn | 4ccf9f9361 | |
Ludovico Magnocavallo | 2cd247bb1f | |
Mikhail Filipchuk | 33e8b42842 | |
Dazbo | 4843d0dfaf | |
ajlopezn | adb88d8a87 | |
ajlopezn | 18f1e8ebb1 | |
Fawzi | db3c48736c | |
Julio Castillo | b1921c9a21 | |
derailed-dash | 6917343a33 | |
lcaggio | c8d5b85ac9 | |
lcaggio | 1bd5c66b42 | |
lcaggio | 83e5ef180c | |
lcaggio | badec52d11 | |
lcaggio | bb2abb8822 | |
Ludovico Magnocavallo | 865ef463bb | |
Ludovico Magnocavallo | 40cda39093 | |
Luca Prete | a9cba47ce8 | |
Luca Prete | 70aefc2ddb | |
Luca Prete | 07f60fb0b9 | |
Ludovico Magnocavallo | bc2d9372aa | |
Luca Prete | bf14d2da23 | |
Julio Castillo | 2e336bcd50 | |
Julio Castillo | e0c15d70e4 | |
Julio Castillo | 25b64c2b80 | |
Julio Castillo | 3d24f0999c | |
Julio Castillo | cef5128f07 | |
Julio Castillo | 1dce9d0aff | |
Julio Castillo | 538833b0d4 | |
Ludo | 90c4602e80 | |
Ludo | 4738471d96 | |
Ludovico Magnocavallo | 92b71a5098 | |
Valerio Ponza | c5db50d1d7 | |
Valerio Ponza | 6204787226 | |
Valerio Ponza | 0a20bc4f2d | |
Valerio Ponza | 1fb7e114fd | |
Valerio Ponza | 596acb63e4 | |
Valerio Ponza | 44d2e9e50e | |
Valerio Ponza | 935e95de2a | |
Valerio Ponza | 64a9952656 | |
Julio Castillo | 2d308c636d | |
Julio Castillo | 7135c9015f | |
Julio Castillo | b503bde544 | |
Simone Ruffilli | e2b0ef55ab | |
Wiktor Niesiobędzki | c76e95e7e8 | |
simonebruzzechesse | 94bf7bd63f | |
Geoff Cardamone | 11b4fee5b5 | |
Julio Castillo | 04c81ad511 | |
Julio Castillo | d64b5779e4 | |
apichick | a6b62084d2 | |
apichick | e5cfa4fafe | |
simonebruzzechesse | b6880104d9 | |
Julio Castillo | 039d10b287 | |
Luca Prete | dfa1fce274 | |
bruzzechesse | d68476ba07 | |
bruzzechesse | cf2fd119b5 | |
Miren Esnaola | 8cbaa932ed | |
Wiktor Niesiobędzki | 9005a51a95 | |
Wiktor Niesiobędzki | 9a0137bcfc | |
Wiktor Niesiobędzki | d105ed59d3 | |
Wiktor Niesiobędzki | 082301c09c | |
bruzzechesse | a93b80cf95 | |
bruzzechesse | 41570d2840 | |
bruzzechesse | 021fb84765 | |
Wiktor Niesiobędzki | 58bc576ddb | |
Ludovico Magnocavallo | 987ea34d93 | |
Ludovico Magnocavallo | 28da881c3f | |
Ludovico Magnocavallo | 603d9045eb | |
Julio Castillo | 1af1c77ae8 | |
Julio Castillo | eb2bee50cd | |
Ludovico Magnocavallo | 3d41d01efc | |
Julio Castillo | 176c5e05cd | |
Ludovico Magnocavallo | 11bc9a80d1 | |
simonebruzzechesse | c4c4688adc | |
apichick | a4bc9c613a | |
Miren Esnaola | faf11c8ac7 | |
Ludovico Magnocavallo | 60261a6352 | |
Julio Castillo | fc000530c8 | |
Julio Castillo | 3d425b7d9c | |
Luca Prete | 3b20d617dc | |
Luca Prete | 6e49e94749 |
|
@ -41,11 +41,21 @@ runs:
|
||||||
- name: Configure provider cache
|
- name: Configure provider cache
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo 'plugin_cache_dir = "/home/runner/.terraform.d/plugin-cache"' \
|
echo 'plugin_cache_dir = "${{ env.TF_PLUGIN_CACHE_DIR }}"' \
|
||||||
| tee -a /home/runner/.terraformrc
|
| tee -a /home/runner/.terraformrc
|
||||||
echo 'disable_checkpoint = true' \
|
echo 'disable_checkpoint = true' \
|
||||||
| tee -a /home/runner/.terraformrc
|
| tee -a /home/runner/.terraformrc
|
||||||
mkdir -p ${{ env.TF_PLUGIN_CACHE_DIR }}
|
mkdir -p ${{ env.TF_PLUGIN_CACHE_DIR }}
|
||||||
|
- name: Download lockfile
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: lockfile
|
||||||
|
path: tools/lockfile
|
||||||
|
- name: Download Terraform provider cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.TF_PLUGIN_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-terraform-${{ hashFiles('tools/lockfile/.terraform.lock.hcl') }}
|
||||||
# avoid conflicts with user-installed providers on local machines
|
# avoid conflicts with user-installed providers on local machines
|
||||||
- name: Pin provider versions
|
- name: Pin provider versions
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
|
@ -37,7 +37,7 @@ jobs:
|
||||||
- name: Set up Terraform
|
- name: Set up Terraform
|
||||||
uses: hashicorp/setup-terraform@v2
|
uses: hashicorp/setup-terraform@v2
|
||||||
with:
|
with:
|
||||||
terraform_version: 1.3.2
|
terraform_version: 1.4.4
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
@ -56,7 +56,7 @@ jobs:
|
||||||
- name: Check documentation
|
- name: Check documentation
|
||||||
id: documentation-fabric
|
id: documentation-fabric
|
||||||
run: |
|
run: |
|
||||||
python3 tools/check_documentation.py modules fast blueprints
|
python3 tools/check_documentation.py --show-diffs modules fast blueprints
|
||||||
|
|
||||||
- name: Check documentation links
|
- name: Check documentation links
|
||||||
id: documentation-links-fabric
|
id: documentation-links-fabric
|
||||||
|
@ -75,3 +75,8 @@ jobs:
|
||||||
tools/*.py \
|
tools/*.py \
|
||||||
blueprints/cloud-operations/network-dashboard/src/*py \
|
blueprints/cloud-operations/network-dashboard/src/*py \
|
||||||
blueprints/cloud-operations/network-dashboard/src/plugins/*py
|
blueprints/cloud-operations/network-dashboard/src/plugins/*py
|
||||||
|
|
||||||
|
- name: Check blueprint metadata
|
||||||
|
id: metadata
|
||||||
|
run: |
|
||||||
|
python tools/validate_metadata.py -v blueprints
|
||||||
|
|
|
@ -28,11 +28,47 @@ env:
|
||||||
PYTEST_ADDOPTS: "--color=yes"
|
PYTEST_ADDOPTS: "--color=yes"
|
||||||
PYTHON_VERSION: "3.10"
|
PYTHON_VERSION: "3.10"
|
||||||
TF_PLUGIN_CACHE_DIR: "/home/runner/.terraform.d/plugin-cache"
|
TF_PLUGIN_CACHE_DIR: "/home/runner/.terraform.d/plugin-cache"
|
||||||
TF_VERSION: 1.3.9
|
TF_VERSION: 1.4.4
|
||||||
|
TFTEST_COPY: 1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
setup-tf-providers:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- uses: hashicorp/setup-terraform@v2
|
||||||
|
with:
|
||||||
|
terraform_version: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
terraform_wrapper: false
|
||||||
|
|
||||||
|
- name: Build lockfile and fetch providers
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir -p ${{ env.TF_PLUGIN_CACHE_DIR }}
|
||||||
|
echo 'plugin_cache_dir = "${{ env.TF_PLUGIN_CACHE_DIR }}"' | tee -a /home/runner/.terraformrc
|
||||||
|
echo 'disable_checkpoint = true' | tee -a /home/runner/.terraformrc
|
||||||
|
cp default-versions.tf tools/lockfile
|
||||||
|
sed -i 's/>=\(.*# tftest\)/=\1/g' tools/lockfile/default-versions.tf
|
||||||
|
cd tools/lockfile
|
||||||
|
terraform init -upgrade=true
|
||||||
|
|
||||||
|
- name: Upload Terraform provider cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.TF_PLUGIN_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-terraform-${{ hashFiles('tools/lockfile/.terraform.lock.hcl') }}
|
||||||
|
|
||||||
|
- name: Upload lockfile
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: lockfile
|
||||||
|
path: tools/lockfile/.terraform.lock.hcl
|
||||||
|
|
||||||
|
|
||||||
examples-blueprints:
|
examples-blueprints:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup-tf-providers
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
@ -43,10 +79,11 @@ jobs:
|
||||||
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
|
||||||
- name: Run tests on documentation examples
|
- name: Run tests on documentation examples
|
||||||
run: pytest -vv -k blueprints/ tests/examples
|
run: pytest -vv -n4 -k blueprints/ tests/examples
|
||||||
|
|
||||||
examples-modules:
|
examples-modules:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup-tf-providers
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
@ -57,10 +94,11 @@ jobs:
|
||||||
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
|
||||||
- name: Run tests on documentation examples
|
- name: Run tests on documentation examples
|
||||||
run: pytest -vv -k modules/ tests/examples
|
run: pytest -vv -n4 -k modules/ tests/examples
|
||||||
|
|
||||||
blueprints:
|
blueprints:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup-tf-providers
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
@ -71,10 +109,11 @@ jobs:
|
||||||
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
|
||||||
- name: Run tests environments
|
- name: Run tests environments
|
||||||
run: pytest -vv tests/blueprints
|
run: pytest -vv -n4 tests/blueprints
|
||||||
|
|
||||||
modules:
|
modules:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup-tf-providers
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
@ -85,10 +124,11 @@ jobs:
|
||||||
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
|
||||||
- name: Run tests modules
|
- name: Run tests modules
|
||||||
run: pytest -vv tests/modules
|
run: pytest -vv -n4 tests/modules
|
||||||
|
|
||||||
fast:
|
fast:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup-tf-providers
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
@ -99,4 +139,4 @@ jobs:
|
||||||
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
TERRAFORM_VERSION: ${{ env.TERRAFORM_VERSION }}
|
||||||
|
|
||||||
- name: Run tests on FAST stages
|
- name: Run tests on FAST stages
|
||||||
run: pytest -vv tests/fast
|
run: pytest -vv -n4 tests/fast
|
||||||
|
|
111
CHANGELOG.md
111
CHANGELOG.md
|
@ -4,10 +4,104 @@ All notable changes to this project will be documented in this file.
|
||||||
<!-- markdownlint-disable MD024 -->
|
<!-- markdownlint-disable MD024 -->
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
<!-- None < 2023-02-04 13:47:22+00:00 -->
|
<!-- None < 2023-03-24 12:44:02+00:00 -->
|
||||||
|
|
||||||
### BLUEPRINTS
|
### BLUEPRINTS
|
||||||
|
|
||||||
|
- [[#1355](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1355)] Fix Shielded Folder - VertexML interoperability ([lcaggio](https://github.com/lcaggio)) <!-- 2023-05-05 07:54:57+00:00 -->
|
||||||
|
- [[#1353](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1353)] fix in IAM binding of Apigee BigQuery analytics blueprint ([apichick](https://github.com/apichick)) <!-- 2023-05-03 16:31:57+00:00 -->
|
||||||
|
- [[#1346](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1346)] **incompatible change:** FAST: shorten stage 3 prefixes, enforce prefix length in stage 3s ([ludoo](https://github.com/ludoo)) <!-- 2023-05-03 05:39:41+00:00 -->
|
||||||
|
- [[#1345](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1345)] chore: update metadata schema ([bharathkkb](https://github.com/bharathkkb)) <!-- 2023-04-28 22:14:21+00:00 -->
|
||||||
|
- [[#1343](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1343)] Fix because of changes in the cloud functions module and the Apigee a… ([apichick](https://github.com/apichick)) <!-- 2023-04-27 12:53:51+00:00 -->
|
||||||
|
- [[#1342](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1342)] Add directory to vertex-mlops blueprint metadata ([juliocc](https://github.com/juliocc)) <!-- 2023-04-27 07:27:31+00:00 -->
|
||||||
|
- [[#1337](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1337)] Improve Vertex mlops blueprint ([lcaggio](https://github.com/lcaggio)) <!-- 2023-04-24 19:01:40+00:00 -->
|
||||||
|
- [[#1338](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1338)] Set all resource requests to the autopilot minimum as the existing va… ([apichick](https://github.com/apichick)) <!-- 2023-04-21 12:26:49+00:00 -->
|
||||||
|
- [[#1330](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1330)] Separating GKE Standard and Autopilot Modules ([avinashkumar1289](https://github.com/avinashkumar1289)) <!-- 2023-04-21 12:08:14+00:00 -->
|
||||||
|
- [[#1334](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1334)] Rename mlops blueprint providers file ([ludoo](https://github.com/ludoo)) <!-- 2023-04-18 09:44:09+00:00 -->
|
||||||
|
- [[#1333](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1333)] Add providers to vertex-mlops blueprint ([juliocc](https://github.com/juliocc)) <!-- 2023-04-18 08:05:15+00:00 -->
|
||||||
|
- [[#1331](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1331)] IAP for Cloud Run GA ([juliodiez](https://github.com/juliodiez)) <!-- 2023-04-17 14:43:08+00:00 -->
|
||||||
|
- [[#1309](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1309)] [DataPlatform] Fix data-eng role on orchestration project ([lcaggio](https://github.com/lcaggio)) <!-- 2023-04-12 14:23:01+00:00 -->
|
||||||
|
- [[#1323](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1323)] fix: create log-export-dataset on shielded-folder when no ecryption keys are defined ([bgdanix](https://github.com/bgdanix)) <!-- 2023-04-12 13:43:25+00:00 -->
|
||||||
|
- [[#1319](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1319)] Fixed wait_time in locust script ([apichick](https://github.com/apichick)) <!-- 2023-04-12 08:39:45+00:00 -->
|
||||||
|
- [[#1312](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1312)] add firewall enforcement variable to VPC ([fawzihmouda](https://github.com/fawzihmouda)) <!-- 2023-04-11 14:09:38+00:00 -->
|
||||||
|
- [[#1305](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1305)] add missing enable_addons reference in gke blueprint for multitenant-… ([jackspyder](https://github.com/jackspyder)) <!-- 2023-04-11 13:15:39+00:00 -->
|
||||||
|
- [[#1306](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1306)] Support new fields in bigquery module, bump provider versions, unpin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-04-05 14:22:53+00:00 -->
|
||||||
|
- [[#1293](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1293)] Refactor cloud run module to use optionals and support all features ([ludoo](https://github.com/ludoo)) <!-- 2023-04-01 12:06:30+00:00 -->
|
||||||
|
- [[#1289](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1289)] **incompatible change:** Network Dashboard improvements and bug fixing ([simonebruzzechesse](https://github.com/simonebruzzechesse)) <!-- 2023-03-29 12:54:07+00:00 -->
|
||||||
|
- [[#1283](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1283)] Fixed permissions of files created ([apichick](https://github.com/apichick)) <!-- 2023-03-27 19:33:49+00:00 -->
|
||||||
|
- [[#1274](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1274)] Add support for VPC Connector and different monitoring project to network dashboard deploy ([ludoo](https://github.com/ludoo)) <!-- 2023-03-24 14:29:13+00:00 -->
|
||||||
|
|
||||||
|
### DOCUMENTATION
|
||||||
|
|
||||||
|
- [[#1357](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1357)] Add module link to README ([prabhaarya](https://github.com/prabhaarya)) <!-- 2023-05-05 08:10:09+00:00 -->
|
||||||
|
- [[#1347](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1347)] Fix external documentation links ([bobidle](https://github.com/bobidle)) <!-- 2023-05-02 05:26:58+00:00 -->
|
||||||
|
- [[#1330](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1330)] Separating GKE Standard and Autopilot Modules ([avinashkumar1289](https://github.com/avinashkumar1289)) <!-- 2023-04-21 12:08:14+00:00 -->
|
||||||
|
- [[#1309](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1309)] [DataPlatform] Fix data-eng role on orchestration project ([lcaggio](https://github.com/lcaggio)) <!-- 2023-04-12 14:23:01+00:00 -->
|
||||||
|
- [[#1311](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1311)] Fixed type in readme for FAST stages ([derailed-dash](https://github.com/derailed-dash)) <!-- 2023-04-08 19:56:19+00:00 -->
|
||||||
|
- [[#892](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/892)] Add network NVA NCC stage ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-04-04 18:41:05+00:00 -->
|
||||||
|
- [[#1297](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1297)] Update CONTRIBUTING.md ([juliocc](https://github.com/juliocc)) <!-- 2023-04-03 12:25:08+00:00 -->
|
||||||
|
- [[#1276](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1276)] DNS Response Policy module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-26 15:42:58+00:00 -->
|
||||||
|
|
||||||
|
### FAST
|
||||||
|
|
||||||
|
- [[#1352](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1352)] **incompatible change:** Switch FAST networking stages to network policies for Google domains ([ludoo](https://github.com/ludoo)) <!-- 2023-05-04 05:38:41+00:00 -->
|
||||||
|
- [[#1346](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1346)] **incompatible change:** FAST: shorten stage 3 prefixes, enforce prefix length in stage 3s ([ludoo](https://github.com/ludoo)) <!-- 2023-05-03 05:39:41+00:00 -->
|
||||||
|
- [[#1344](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1344)] Add logging details to bootstrap outputs ([juliocc](https://github.com/juliocc)) <!-- 2023-04-27 11:27:25+00:00 -->
|
||||||
|
- [[#1324](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1324)] Fix typo in FAST cicd extra stage variable name ([ludoo](https://github.com/ludoo)) <!-- 2023-04-17 07:40:05+00:00 -->
|
||||||
|
- [[#1328](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1328)] Strip org name from deploy key repo in FAST cicd stage ([ludoo](https://github.com/ludoo)) <!-- 2023-04-17 06:59:08+00:00 -->
|
||||||
|
- [[#1318](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1318)] Allow longer org prefix plus tenant prefix ([derailed-dash](https://github.com/derailed-dash)) <!-- 2023-04-11 23:36:37+00:00 -->
|
||||||
|
- [[#1315](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1315)] Fix stage links script for multitenant stages ([ludoo](https://github.com/ludoo)) <!-- 2023-04-11 09:43:39+00:00 -->
|
||||||
|
- [[#1313](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1313)] Fixed typo in readme for FAST multitenant ([derailed-dash](https://github.com/derailed-dash)) <!-- 2023-04-11 02:47:04+00:00 -->
|
||||||
|
- [[#892](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/892)] Add network NVA NCC stage ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-04-04 18:41:05+00:00 -->
|
||||||
|
- [[#1285](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1285)] Update YAML schema for hierarchical firewall rules ([sruffilli](https://github.com/sruffilli)) <!-- 2023-03-30 06:30:53+00:00 -->
|
||||||
|
- [[#1284](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1284)] Update Provider and Terraform variables section in FAST project factory ([gcardamone](https://github.com/gcardamone)) <!-- 2023-03-28 14:18:45+00:00 -->
|
||||||
|
|
||||||
|
### MODULES
|
||||||
|
|
||||||
|
- [[#1329](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1329)] fix: Change net-glb serve_while_stale type to number ([tobbbles](https://github.com/tobbbles)) <!-- 2023-05-05 07:41:13+00:00 -->
|
||||||
|
- [[#1308](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1308)] Add cloud dataplex module ([prabhaarya](https://github.com/prabhaarya)) <!-- 2023-05-05 07:26:46+00:00 -->
|
||||||
|
- [[#1352](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1352)] **incompatible change:** Switch FAST networking stages to network policies for Google domains ([ludoo](https://github.com/ludoo)) <!-- 2023-05-04 05:38:41+00:00 -->
|
||||||
|
- [[#1349](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1349)] Enhance GKE Backup Configuration Support ([tacchino](https://github.com/tacchino)) <!-- 2023-05-02 14:59:12+00:00 -->
|
||||||
|
- [[#1348](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1348)] Ignore entire node config in standard cluster ([ludoo](https://github.com/ludoo)) <!-- 2023-05-02 13:23:03+00:00 -->
|
||||||
|
- [[#1337](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1337)] Improve Vertex mlops blueprint ([lcaggio](https://github.com/lcaggio)) <!-- 2023-04-24 19:01:40+00:00 -->
|
||||||
|
- [[#1330](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1330)] Separating GKE Standard and Autopilot Modules ([avinashkumar1289](https://github.com/avinashkumar1289)) <!-- 2023-04-21 12:08:14+00:00 -->
|
||||||
|
- [[#1336](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1336)] Certificate renewal through terraform ([bjohnrl](https://github.com/bjohnrl)) <!-- 2023-04-19 09:20:01+00:00 -->
|
||||||
|
- [[#1335](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1335)] Inconsistent conditional result types error in net-vpc module ([jamesmao-xyz](https://github.com/jamesmao-xyz)) <!-- 2023-04-18 11:07:17+00:00 -->
|
||||||
|
- [[#1332](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1332)] Add CMEK support on Secret manager module ([lcaggio](https://github.com/lcaggio)) <!-- 2023-04-18 05:05:10+00:00 -->
|
||||||
|
- [[#1326](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1326)] Remove net-interconnect-attachment-direct ([juliocc](https://github.com/juliocc)) <!-- 2023-04-14 09:28:26+00:00 -->
|
||||||
|
- [[#1322](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1322)] Add inventories to net-vpc-firewall tests ([juliocc](https://github.com/juliocc)) <!-- 2023-04-12 12:27:34+00:00 -->
|
||||||
|
- [[#1320](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1320)] issue #1303: net-vpc-firewall module supporting source and destination ranges ([ajlopezn](https://github.com/ajlopezn)) <!-- 2023-04-12 10:32:18+00:00 -->
|
||||||
|
- [[#1312](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1312)] add firewall enforcement variable to VPC ([fawzihmouda](https://github.com/fawzihmouda)) <!-- 2023-04-11 14:09:38+00:00 -->
|
||||||
|
- [[#1310](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1310)] Use labels var in cloud-run module ([LiuVII](https://github.com/LiuVII)) <!-- 2023-04-11 03:06:13+00:00 -->
|
||||||
|
- [[#1306](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1306)] Support new fields in bigquery module, bump provider versions, unpin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-04-05 14:22:53+00:00 -->
|
||||||
|
- [[#1301](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1301)] Add ability to run vtysh from simple-nva vm directly when frr is active ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-04-03 19:37:02+00:00 -->
|
||||||
|
- [[#1300](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1300)] Fix vtysh ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-04-03 14:37:46+00:00 -->
|
||||||
|
- [[#1299](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1299)] Fix urlmap in ILB L7 module ([ludoo](https://github.com/ludoo)) <!-- 2023-04-03 13:47:38+00:00 -->
|
||||||
|
- [[#1298](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1298)] Add sample vtysh file to remove warnings ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-04-03 13:10:47+00:00 -->
|
||||||
|
- [[#1293](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1293)] Refactor cloud run module to use optionals and support all features ([ludoo](https://github.com/ludoo)) <!-- 2023-04-01 12:06:30+00:00 -->
|
||||||
|
- [[#1287](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1287)] **incompatible change:** Add support for backup and remove deprecated control plane field in GKE module ([valeriobponza](https://github.com/valeriobponza)) <!-- 2023-03-30 10:47:40+00:00 -->
|
||||||
|
- [[#1295](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1295)] Load all service agents identities from yaml ([juliocc](https://github.com/juliocc)) <!-- 2023-03-30 07:02:05+00:00 -->
|
||||||
|
- [[#1294](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1294)] Add Cloud Batch service identity ([wiktorn](https://github.com/wiktorn)) <!-- 2023-03-30 06:05:12+00:00 -->
|
||||||
|
- [[#1280](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1280)] Add Dataplex Service Identity ([wiktorn](https://github.com/wiktorn)) <!-- 2023-03-27 20:11:07+00:00 -->
|
||||||
|
- [[#1282](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1282)] Added local firewall management (iptables) on the NVA for dealing with COS default deny on inbound connections ([simonebruzzechesse](https://github.com/simonebruzzechesse)) <!-- 2023-03-27 14:32:57+00:00 -->
|
||||||
|
- [[#1281](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1281)] Use unique bundle name for Cloud Function ([wiktorn](https://github.com/wiktorn)) <!-- 2023-03-27 12:13:38+00:00 -->
|
||||||
|
- [[#1278](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1278)] DNS policy module fixes ([ludoo](https://github.com/ludoo)) <!-- 2023-03-26 16:39:07+00:00 -->
|
||||||
|
- [[#1276](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1276)] DNS Response Policy module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-26 15:42:58+00:00 -->
|
||||||
|
|
||||||
|
### TOOLS
|
||||||
|
|
||||||
|
- [[#1340](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1340)] Extend tests to use lockfile if available ([juliocc](https://github.com/juliocc)) <!-- 2023-04-26 09:10:13+00:00 -->
|
||||||
|
- [[#1339](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1339)] Deprecate plan runner fixture and all its variants ([juliocc](https://github.com/juliocc)) <!-- 2023-04-22 11:43:51+00:00 -->
|
||||||
|
- [[#1327](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1327)] Migrate more tests ([juliocc](https://github.com/juliocc)) <!-- 2023-04-17 07:18:07+00:00 -->
|
||||||
|
- [[#1307](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1307)] Bump Terraform version ([ludoo](https://github.com/ludoo)) <!-- 2023-04-05 07:15:23+00:00 -->
|
||||||
|
|
||||||
|
## [21.0.0] - 2023-03-24
|
||||||
|
<!-- 2023-03-24 12:44:02+00:00 < 2023-02-04 13:47:22+00:00 -->
|
||||||
|
|
||||||
|
### BLUEPRINTS
|
||||||
|
|
||||||
|
- [[#1272](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1272)] Removed repeated command in script used to deploy API proxy ([apichick](https://github.com/apichick)) <!-- 2023-03-22 10:16:39+00:00 -->
|
||||||
|
- [[#1261](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1261)] Fix variable terraform.tfvars.sample ([dedeco](https://github.com/dedeco)) <!-- 2023-03-17 10:13:11+00:00 -->
|
||||||
- [[#1257](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1257)] Fixes related to boot_disk in compute-vm module ([apichick](https://github.com/apichick)) <!-- 2023-03-16 15:24:26+00:00 -->
|
- [[#1257](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1257)] Fixes related to boot_disk in compute-vm module ([apichick](https://github.com/apichick)) <!-- 2023-03-16 15:24:26+00:00 -->
|
||||||
- [[#1256](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1256)] **incompatible change:** Pin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-03-16 10:59:07+00:00 -->
|
- [[#1256](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1256)] **incompatible change:** Pin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-03-16 10:59:07+00:00 -->
|
||||||
- [[#1245](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1245)] Composer-2 - Fix 1236 ([lcaggio](https://github.com/lcaggio)) <!-- 2023-03-13 20:48:22+00:00 -->
|
- [[#1245](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1245)] Composer-2 - Fix 1236 ([lcaggio](https://github.com/lcaggio)) <!-- 2023-03-13 20:48:22+00:00 -->
|
||||||
|
@ -63,6 +157,10 @@ All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
### FAST
|
### FAST
|
||||||
|
|
||||||
|
- [[#1266](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1266)] FAST plugin system ([ludoo](https://github.com/ludoo)) <!-- 2023-03-24 12:28:32+00:00 -->
|
||||||
|
- [[#1273](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1273)] Small fixes to FAST Networking stage with NVAs ([simonebruzzechesse](https://github.com/simonebruzzechesse)) <!-- 2023-03-23 08:57:01+00:00 -->
|
||||||
|
- [[#1265](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1265)] Fix FAST hub and spoke with VPN networking stage ([ludoo](https://github.com/ludoo)) <!-- 2023-03-17 19:52:40+00:00 -->
|
||||||
|
- [[#1263](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1263)] Widen scope for prod project factory SA to dev ([ludoo](https://github.com/ludoo)) <!-- 2023-03-17 16:24:56+00:00 -->
|
||||||
- [[#1240](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1240)] feat: Enable populating of data directory and .sample files and update dependencies in 0-cicd-github ([antonkovach](https://github.com/antonkovach)) <!-- 2023-03-15 13:55:08+00:00 -->
|
- [[#1240](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1240)] feat: Enable populating of data directory and .sample files and update dependencies in 0-cicd-github ([antonkovach](https://github.com/antonkovach)) <!-- 2023-03-15 13:55:08+00:00 -->
|
||||||
- [[#1249](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1249)] Document need to set `outputs_location` explicitly in every stage ([ludoo](https://github.com/ludoo)) <!-- 2023-03-15 10:43:44+00:00 -->
|
- [[#1249](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1249)] Document need to set `outputs_location` explicitly in every stage ([ludoo](https://github.com/ludoo)) <!-- 2023-03-15 10:43:44+00:00 -->
|
||||||
- [[#1247](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1247)] Fast: resman: location and storage class added to GKE GCS buckets ([skalolazka](https://github.com/skalolazka)) <!-- 2023-03-14 15:37:16+00:00 -->
|
- [[#1247](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1247)] Fast: resman: location and storage class added to GKE GCS buckets ([skalolazka](https://github.com/skalolazka)) <!-- 2023-03-14 15:37:16+00:00 -->
|
||||||
|
@ -92,6 +190,13 @@ All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
### MODULES
|
### MODULES
|
||||||
|
|
||||||
|
- [[#1270](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1270)] Add static gateway id to outputs of VPN ha module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-21 17:08:46+00:00 -->
|
||||||
|
- [[#1269](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1269)] Ignore changes to metadata.0.annotations in Cloud Run module ([juliocc](https://github.com/juliocc)) <!-- 2023-03-21 11:21:59+00:00 -->
|
||||||
|
- [[#1267](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1267)] Improvements to NCC-RA spoke module. ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-03-21 07:07:44+00:00 -->
|
||||||
|
- [[#1268](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1268)] simple-nva: add ability to parse BGP configs as strings. ([LucaPrete](https://github.com/LucaPrete)) <!-- 2023-03-21 06:41:13+00:00 -->
|
||||||
|
- [[#1258](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1258)] Add backend service names to outputs for net-glb and net-ilb-l7 ([rosmo](https://github.com/rosmo)) <!-- 2023-03-17 10:40:11+00:00 -->
|
||||||
|
- [[#1259](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1259)] Add support for `iam_additive` and simplify factory interface in net VPC module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-17 10:12:35+00:00 -->
|
||||||
|
- [[#1255](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1255)] **incompatible change:** Change `target_vpcs` variable in firewall policy module to support dynamic values ([ludoo](https://github.com/ludoo)) <!-- 2023-03-17 07:14:10+00:00 -->
|
||||||
- [[#1256](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1256)] **incompatible change:** Pin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-03-16 10:59:07+00:00 -->
|
- [[#1256](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1256)] **incompatible change:** Pin local provider ([ludoo](https://github.com/ludoo)) <!-- 2023-03-16 10:59:07+00:00 -->
|
||||||
- [[#1246](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1246)] Delay creation of SVPC host bindings until APIs and JIT SAs are done ([juliocc](https://github.com/juliocc)) <!-- 2023-03-14 14:16:59+00:00 -->
|
- [[#1246](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1246)] Delay creation of SVPC host bindings until APIs and JIT SAs are done ([juliocc](https://github.com/juliocc)) <!-- 2023-03-14 14:16:59+00:00 -->
|
||||||
- [[#1241](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1241)] **incompatible change:** Allow using existing boot disk in compute-vm module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-12 09:54:00+00:00 -->
|
- [[#1241](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1241)] **incompatible change:** Allow using existing boot disk in compute-vm module ([ludoo](https://github.com/ludoo)) <!-- 2023-03-12 09:54:00+00:00 -->
|
||||||
|
@ -141,6 +246,7 @@ All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
### TOOLS
|
### TOOLS
|
||||||
|
|
||||||
|
- [[#1266](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1266)] FAST plugin system ([ludoo](https://github.com/ludoo)) <!-- 2023-03-24 12:28:32+00:00 -->
|
||||||
- [[#1242](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1242)] Remove container image workflows ([kunzese](https://github.com/kunzese)) <!-- 2023-03-13 07:39:04+00:00 -->
|
- [[#1242](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1242)] Remove container image workflows ([kunzese](https://github.com/kunzese)) <!-- 2023-03-13 07:39:04+00:00 -->
|
||||||
- [[#1231](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1231)] Simplify testing workflow ([juliocc](https://github.com/juliocc)) <!-- 2023-03-09 15:27:05+00:00 -->
|
- [[#1231](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1231)] Simplify testing workflow ([juliocc](https://github.com/juliocc)) <!-- 2023-03-09 15:27:05+00:00 -->
|
||||||
- [[#1216](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1216)] Use composite action for test workflow prerequisite steps ([ludoo](https://github.com/ludoo)) <!-- 2023-03-06 10:44:58+00:00 -->
|
- [[#1216](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/pull/1216)] Use composite action for test workflow prerequisite steps ([ludoo](https://github.com/ludoo)) <!-- 2023-03-06 10:44:58+00:00 -->
|
||||||
|
@ -1082,7 +1188,8 @@ All notable changes to this project will be documented in this file.
|
||||||
- merge development branch with suite of new modules and end-to-end examples
|
- merge development branch with suite of new modules and end-to-end examples
|
||||||
|
|
||||||
<!-- markdown-link-check-disable -->
|
<!-- markdown-link-check-disable -->
|
||||||
[Unreleased]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v20.0.0...HEAD
|
[Unreleased]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v21.0.0...HEAD
|
||||||
|
[21.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v20.0.0...v21.0.0
|
||||||
[20.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v19.0.0...v20.0.0
|
[20.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v19.0.0...v20.0.0
|
||||||
[19.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v18.0.0...v19.0.0
|
[19.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v18.0.0...v19.0.0
|
||||||
[18.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v16.0.0...v18.0.0
|
[18.0.0]: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/compare/v16.0.0...v18.0.0
|
||||||
|
|
|
@ -4,26 +4,26 @@ Contributors are the engine that keeps Fabric alive so if you were or are planni
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
* [I just found a bug / have a feature request](#i-just-found-a-bug---have-a-feature-request)
|
- [I just found a bug / have a feature request](#i-just-found-a-bug--have-a-feature-request)
|
||||||
* [Quick developer workflow](#quick-developer-workflow)
|
- [Quick developer workflow](#quick-developer-workflow)
|
||||||
* [Developer's handbook](#developers-handbook)
|
- [Developer's handbook](#developers-handbook)
|
||||||
+ [The Zen of Fabric](#the-zen-of-fabric)
|
* [The Zen of Fabric](#the-zen-of-fabric)
|
||||||
+ [Design principles in action](#design-principles-in-action)
|
* [Design principles in action](#design-principles-in-action)
|
||||||
+ [FAST stage design](#fast-stage-design)
|
* [FAST stage design](#fast-stage-design)
|
||||||
+ [Style guide reference](#style-guide-reference)
|
* [Style guide reference](#style-guide-reference)
|
||||||
+ [Interacting with checks and tools](#interacting-with-checks-and-tools)
|
* [Interacting with checks and tools](#interacting-with-checks-and-tools)
|
||||||
* [Using and writing tests](#using-and-writing-tests)
|
- [Using and writing tests](#using-and-writing-tests)
|
||||||
+ [Testing via README.md example blocks.](#testing-via-readmemd-example-blocks)
|
* [Testing via README.md example blocks.](#testing-via-readmemd-example-blocks)
|
||||||
- [Testing examples against an inventory YAML](#testing-examples-against-an-inventory-yaml)
|
+ [Testing examples against an inventory YAML](#testing-examples-against-an-inventory-yaml)
|
||||||
- [Using external files](#using-external-files)
|
+ [Using external files](#using-external-files)
|
||||||
- [Running tests for specific examples](#running-tests-for-specific-examples)
|
+ [Running tests for specific examples](#running-tests-for-specific-examples)
|
||||||
- [Generating the inventory automatically](#generating-the-inventory-automatically)
|
+ [Generating the inventory automatically](#generating-the-inventory-automatically)
|
||||||
- [Building tests for blueprints](#building-tests-for-blueprints)
|
+ [Building tests for blueprints](#building-tests-for-blueprints)
|
||||||
+ [Testing via `tfvars` and `yaml` (aka `tftest`-based tests)](#testing-via--tfvars--and--yaml---aka--tftest--based-tests-)
|
* [Testing via `tfvars` and `yaml` (aka `tftest`-based tests)](#testing-via-tfvars-and-yaml-aka-tftest-based-tests)
|
||||||
- [Generating the inventory for `tftest`-based tests](#generating-the-inventory-for--tftest--based-tests)
|
+ [Generating the inventory for `tftest`-based tests](#generating-the-inventory-for-tftest-based-tests)
|
||||||
+ [Writing tests in Python (legacy approach)](#writing-tests-in-python--legacy-approach-)
|
* [Writing tests in Python (legacy approach)](#writing-tests-in-python-legacy-approach)
|
||||||
+ [Running tests from a temporary directory](#running-tests-from-a-temporary-directory)
|
* [Running tests from a temporary directory](#running-tests-from-a-temporary-directory)
|
||||||
* [Fabric tools](#fabric-tools)
|
- [Fabric tools](#fabric-tools)
|
||||||
|
|
||||||
## I just found a bug / have a feature request
|
## I just found a bug / have a feature request
|
||||||
|
|
||||||
|
@ -123,6 +123,10 @@ This section illustrates how our design principles translate into actual code. W
|
||||||
|
|
||||||
#### Design by logical entity instead of product/feature
|
#### Design by logical entity instead of product/feature
|
||||||
|
|
||||||
|
> “The most fundamental problem in computer science is problem decomposition: how to take a complex problem and divide it up into pieces that can be solved independently.”
|
||||||
|
>
|
||||||
|
> — John Ousterhout in "A Philosophy of Software Design"
|
||||||
|
|
||||||
This is probably our oldest and most important design principle. When designing a module or a FAST stage we look at its domain from a functional point of view: **what is the subset of resources (or modules for FAST) that fully describes one entity and allows encapsulating its full configuration?**
|
This is probably our oldest and most important design principle. When designing a module or a FAST stage we look at its domain from a functional point of view: **what is the subset of resources (or modules for FAST) that fully describes one entity and allows encapsulating its full configuration?**
|
||||||
|
|
||||||
It's a radically different approach from designing by product or feature, where boundaries are drawn around a single GCP functionality.
|
It's a radically different approach from designing by product or feature, where boundaries are drawn around a single GCP functionality.
|
||||||
|
@ -200,6 +204,12 @@ We have several such interfaces defined for IAM, log sinks, organizational polic
|
||||||
|
|
||||||
#### Design interfaces to support actual usage
|
#### Design interfaces to support actual usage
|
||||||
|
|
||||||
|
> “When developing a module, look for opportunities to take a little bit of extra suffering upon yourself in order to reduce the suffering of your users.”
|
||||||
|
>
|
||||||
|
> “Providing choice is good, but interfaces should be designed to make the common case as simple as possible”
|
||||||
|
>
|
||||||
|
> — John Ousterhout in "A Philosophy of Software Design"
|
||||||
|
|
||||||
Variables should not simply map to the underlying resource attributes, but their **interfaces should be designed to match common use cases** to reduce friction and offer the highest possible degree of legibility.
|
Variables should not simply map to the underlying resource attributes, but their **interfaces should be designed to match common use cases** to reduce friction and offer the highest possible degree of legibility.
|
||||||
|
|
||||||
This translates into different practical approaches:
|
This translates into different practical approaches:
|
||||||
|
@ -286,6 +296,11 @@ module "project" {
|
||||||
|
|
||||||
#### Design compact variable spaces
|
#### Design compact variable spaces
|
||||||
|
|
||||||
|
> "The best modules are those whose interfaces are much simpler than their implementations"
|
||||||
|
>
|
||||||
|
> — John Ousterhout in "A Philosophy of Software Design"
|
||||||
|
|
||||||
|
|
||||||
Designing variable spaces is one of the most complex aspects to get right, as they are the main entry point through which users consume modules, examples and FAST stages. We always strive to **design small variable spaces by leveraging objects and implementing defaults** so that users can quickly produce highly readable code.
|
Designing variable spaces is one of the most complex aspects to get right, as they are the main entry point through which users consume modules, examples and FAST stages. We always strive to **design small variable spaces by leveraging objects and implementing defaults** so that users can quickly produce highly readable code.
|
||||||
|
|
||||||
One of many examples of this approach comes from disk support in the `compute-vm` module, where preset defaults allow quick VM management with very few lines of code, and optional variables allow progressively expanding the code when more control is needed.
|
One of many examples of this approach comes from disk support in the `compute-vm` module, where preset defaults allow quick VM management with very few lines of code, and optional variables allow progressively expanding the code when more control is needed.
|
||||||
|
@ -413,6 +428,10 @@ Each FAST stage should be designed so that it can optionally be used in isolatio
|
||||||
|
|
||||||
#### Stage interfaces
|
#### Stage interfaces
|
||||||
|
|
||||||
|
> “The best modules are those that provide powerful functionality yet have simple interfaces.”
|
||||||
|
>
|
||||||
|
> — John Ousterhout in "A Philosophy of Software Design"
|
||||||
|
|
||||||
Stages are designed based on the concept of ["contracts" or interfaces](./fast/README.md#contracts-and-stages), which define what information is produced by one stage via outputs, which is then consumed by subsequent stages via variables.
|
Stages are designed based on the concept of ["contracts" or interfaces](./fast/README.md#contracts-and-stages), which define what information is produced by one stage via outputs, which is then consumed by subsequent stages via variables.
|
||||||
|
|
||||||
Interfaces are compact in size (few variables) but broad in scope (variables typically leverage maps), so that consumers can declare in variable types only the bits of information they are interested in.
|
Interfaces are compact in size (few variables) but broad in scope (variables typically leverage maps), so that consumers can declare in variable types only the bits of information they are interested in.
|
||||||
|
@ -962,13 +981,17 @@ tests:
|
||||||
# run a test named `test-plan`, load the specified tfvars files
|
# run a test named `test-plan`, load the specified tfvars files
|
||||||
# use the default inventory file of `test-plan.yaml`
|
# use the default inventory file of `test-plan.yaml`
|
||||||
test-plan:
|
test-plan:
|
||||||
tfvars: # if ommited, we load test-plan.tfvars by default
|
tfvars: # if omitted, we load test-plan.tfvars by default
|
||||||
- test-plan.tfvars
|
- test-plan.tfvars
|
||||||
- test-plan-extra.tfvars
|
- test-plan-extra.tfvars
|
||||||
inventory:
|
inventory:
|
||||||
- test-plan.yaml
|
- test-plan.yaml
|
||||||
|
# You can use `extra_files` to include additional tf files outside
|
||||||
|
# the module's path before running the test.
|
||||||
|
# extra_files:
|
||||||
|
# - ../plugin-x/*.tf
|
||||||
|
|
||||||
# You can ommit the tfvars and inventory sections and they will
|
# You can omit the tfvars and inventory sections and they will
|
||||||
# default to the name of the test. The following two examples are equivalent:
|
# default to the name of the test. The following two examples are equivalent:
|
||||||
#
|
#
|
||||||
# test-plan2:
|
# test-plan2:
|
||||||
|
|
|
@ -161,4 +161,4 @@ Even with all the above points, it may be hard to make a decision. While the mod
|
||||||
|
|
||||||
* Since modules work well together within their ecosystem, select logical boundaries for using Fabric or CFT. For example use CFT for deploying resources within projects but use Fabric for managing project creation and IAM.
|
* Since modules work well together within their ecosystem, select logical boundaries for using Fabric or CFT. For example use CFT for deploying resources within projects but use Fabric for managing project creation and IAM.
|
||||||
* Use strengths of each collection of modules to your advantage. Empower application teams to define their infrastructure as code using off the shelf CFT modules. Using Fabric, bootstrap your platform team with a collection of tailor built modules for your organization.
|
* Use strengths of each collection of modules to your advantage. Empower application teams to define their infrastructure as code using off the shelf CFT modules. Using Fabric, bootstrap your platform team with a collection of tailor built modules for your organization.
|
||||||
* Lean into module composition and dependency inversion that both Fabric and CFT modules follow. For example, you can create a GKE cluster using either [Fabric](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/modules/gke-cluster#gke-cluster-module) or [CFT](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine) GKE module and then use either [Fabric](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/modules/gke-hub#variables) or [CFT](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/fleet-membership) for setting up GKE Hub by passing in outputs from the GKE module.
|
* Lean into module composition and dependency inversion that both Fabric and CFT modules follow. For example, you can create a GKE cluster using either [Fabric](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/modules/gke-cluster-standard#gke-cluster-module) or [CFT](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine) GKE module and then use either [Fabric](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/modules/gke-hub#variables) or [CFT](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/fleet-membership) for setting up GKE Hub by passing in outputs from the GKE module.
|
||||||
|
|
|
@ -30,9 +30,9 @@ The current list of modules supports most of the core foundational and networkin
|
||||||
Currently available modules:
|
Currently available modules:
|
||||||
|
|
||||||
- **foundational** - [billing budget](./modules/billing-budget), [Cloud Identity group](./modules/cloud-identity-group/), [folder](./modules/folder), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [organization](./modules/organization), [project](./modules/project), [projects-data-source](./modules/projects-data-source)
|
- **foundational** - [billing budget](./modules/billing-budget), [Cloud Identity group](./modules/cloud-identity-group/), [folder](./modules/folder), [service accounts](./modules/iam-service-account), [logging bucket](./modules/logging-bucket), [organization](./modules/organization), [project](./modules/project), [projects-data-source](./modules/projects-data-source)
|
||||||
- **networking** - [DNS](./modules/dns), [Cloud Endpoints](./modules/endpoints), [address reservation](./modules/net-address), [NAT](./modules/net-cloudnat), [Global Load Balancer (classic)](./modules/net-glb/), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC firewall policy](./modules/net-vpc-firewall-policy), [VPC peering](./modules/net-vpc-peering), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [VPN static](./modules/net-vpn-static), [Service Directory](./modules/service-directory)
|
- **networking** - [DNS](./modules/dns), [DNS Response Policy](./modules/dns-response-policy/), [Cloud Endpoints](./modules/endpoints), [address reservation](./modules/net-address), [NAT](./modules/net-cloudnat), [Global Load Balancer (classic)](./modules/net-glb/), [L4 ILB](./modules/net-ilb), [L7 ILB](./modules/net-ilb-l7), [VPC](./modules/net-vpc), [VPC firewall](./modules/net-vpc-firewall), [VPC firewall policy](./modules/net-vpc-firewall-policy), [VPC peering](./modules/net-vpc-peering), [VPN dynamic](./modules/net-vpn-dynamic), [HA VPN](./modules/net-vpn-ha), [VPN static](./modules/net-vpn-static), [Service Directory](./modules/service-directory)
|
||||||
- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid), [GKE cluster](./modules/gke-cluster), [GKE hub](./modules/gke-hub), [GKE nodepool](./modules/gke-nodepool)
|
- **compute** - [VM/VM group](./modules/compute-vm), [MIG](./modules/compute-mig), [COS container](./modules/cloud-config-container/cos-generic-metadata/) (coredns, mysql, onprem, squid), [GKE cluster](./modules/gke-cluster-standard), [GKE hub](./modules/gke-hub), [GKE nodepool](./modules/gke-nodepool)
|
||||||
- **data** - [BigQuery dataset](./modules/bigquery-dataset), [Bigtable instance](./modules/bigtable-instance), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag), [Datafusion](./modules/datafusion), [Dataproc](./modules/dataproc), [GCS](./modules/gcs), [Pub/Sub](./modules/pubsub)
|
- **data** - [BigQuery dataset](./modules/bigquery-dataset), [Bigtable instance](./modules/bigtable-instance), [Cloud Dataplex](./modules/cloud-dataplex), [Cloud SQL instance](./modules/cloudsql-instance), [Data Catalog Policy Tag](./modules/data-catalog-policy-tag), [Datafusion](./modules/datafusion), [Dataproc](./modules/dataproc), [GCS](./modules/gcs), [Pub/Sub](./modules/pubsub)
|
||||||
- **development** - [API Gateway](./modules/api-gateway), [Apigee](./modules/apigee), [Artifact Registry](./modules/artifact-registry), [Container Registry](./modules/container-registry), [Cloud Source Repository](./modules/source-repository)
|
- **development** - [API Gateway](./modules/api-gateway), [Apigee](./modules/apigee), [Artifact Registry](./modules/artifact-registry), [Container Registry](./modules/container-registry), [Cloud Source Repository](./modules/source-repository)
|
||||||
- **security** - [Binauthz](./modules/binauthz/), [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc)
|
- **security** - [Binauthz](./modules/binauthz/), [KMS](./modules/kms), [SecretManager](./modules/secret-manager), [VPC Service Control](./modules/vpc-sc)
|
||||||
- **serverless** - [Cloud Function](./modules/cloud-function), [Cloud Run](./modules/cloud-run)
|
- **serverless** - [Cloud Function](./modules/cloud-function), [Cloud Run](./modules/cloud-run)
|
||||||
|
|
|
@ -6,7 +6,7 @@ Currently available blueprints:
|
||||||
|
|
||||||
- **apigee** - [Apigee Hybrid on GKE](./apigee/hybrid-gke/), [Apigee X analytics in BigQuery](./apigee/bigquery-analytics), [Apigee network patterns](./apigee/network-patterns/)
|
- **apigee** - [Apigee Hybrid on GKE](./apigee/hybrid-gke/), [Apigee X analytics in BigQuery](./apigee/bigquery-analytics), [Apigee network patterns](./apigee/network-patterns/)
|
||||||
- **cloud operations** - [Active Directory Federation Services](./cloud-operations/adfs), [Cloud Asset Inventory feeds for resource change tracking and remediation](./cloud-operations/asset-inventory-feed-remediation), [Fine-grained Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Cloud DNS & Shared VPC design](./cloud-operations/dns-shared-vpc), [Delegated Role Grants](./cloud-operations/iam-delegated-role-grants), [Networking Dashboard](./cloud-operations/network-dashboard), [Managing on-prem service account keys by uploading public keys](./cloud-operations/onprem-sa-key-management), [Compute Image builder with Hashicorp Packer](./cloud-operations/packer-image-builder), [Packer example](./cloud-operations/packer-image-builder/packer), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Configuring workload identity federation with Terraform Cloud/Enterprise workflows](./cloud-operations/terraform-cloud-dynamic-credentials), [TCP healthcheck and restart for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [Migrate for Compute Engine (v5) blueprints](./cloud-operations/vm-migration), [Configuring workload identity federation to access Google Cloud resources from apps running on Azure](./cloud-operations/workload-identity-federation)
|
- **cloud operations** - [Active Directory Federation Services](./cloud-operations/adfs), [Cloud Asset Inventory feeds for resource change tracking and remediation](./cloud-operations/asset-inventory-feed-remediation), [Fine-grained Cloud DNS IAM via Service Directory](./cloud-operations/dns-fine-grained-iam), [Cloud DNS & Shared VPC design](./cloud-operations/dns-shared-vpc), [Delegated Role Grants](./cloud-operations/iam-delegated-role-grants), [Networking Dashboard](./cloud-operations/network-dashboard), [Managing on-prem service account keys by uploading public keys](./cloud-operations/onprem-sa-key-management), [Compute Image builder with Hashicorp Packer](./cloud-operations/packer-image-builder), [Packer example](./cloud-operations/packer-image-builder/packer), [Compute Engine quota monitoring](./cloud-operations/quota-monitoring), [Scheduled Cloud Asset Inventory Export to Bigquery](./cloud-operations/scheduled-asset-inventory-export-bq), [Configuring workload identity federation with Terraform Cloud/Enterprise workflows](./cloud-operations/terraform-cloud-dynamic-credentials), [TCP healthcheck and restart for unmanaged GCE instances](./cloud-operations/unmanaged-instances-healthcheck), [Migrate for Compute Engine (v5) blueprints](./cloud-operations/vm-migration), [Configuring workload identity federation to access Google Cloud resources from apps running on Azure](./cloud-operations/workload-identity-federation)
|
||||||
- **data solutions** - [GCE and GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion), [Data Platform](./data-solutions/data-platform-foundations), [Spinning up a foundation data pipeline on Google Cloud using Cloud Storage, Dataflow and BigQuery](./data-solutions/gcs-to-bq-with-least-privileges), [#SQL Server Always On Groups blueprint](./data-solutions/sqlserver-alwayson), [Data Playground](./data-solutions/data-playground), [MLOps with Vertex AI](./data-solutions/vertex-mlops), [Shielded Folder](./data-solutions/shielded-folder), [BigQuery ML and Vertex AI Pipeline](./data-solutions/bq-ml)
|
- **data solutions** - [GCE and GCS CMEK via centralized Cloud KMS](./data-solutions/cmek-via-centralized-kms), [Cloud Composer version 2 private instance, supporting Shared VPC and external CMEK key](./data-solutions/composer-2), [Cloud SQL instance with multi-region read replicas](./data-solutions/cloudsql-multiregion), [Data Platform](./data-solutions/data-platform-foundations), [Minimal Data Platform](./data-solutions/data-platform-minimal), [Spinning up a foundation data pipeline on Google Cloud using Cloud Storage, Dataflow and BigQuery](./data-solutions/gcs-to-bq-with-least-privileges), [#SQL Server Always On Groups blueprint](./data-solutions/sqlserver-alwayson), [Data Playground](./data-solutions/data-playground), [MLOps with Vertex AI](./data-solutions/vertex-mlops), [Shielded Folder](./data-solutions/shielded-folder), [BigQuery ML and Vertex AI Pipeline](./data-solutions/bq-ml)
|
||||||
- **factories** - [The why and the how of Resource Factories](./factories), [Google Cloud Identity Group Factory](./factories/cloud-identity-group-factory), [Google Cloud BQ Factory](./factories/bigquery-factory), [Google Cloud VPC Firewall Factory](./factories/net-vpc-firewall-yaml), [Minimal Project Factory](./factories/project-factory)
|
- **factories** - [The why and the how of Resource Factories](./factories), [Google Cloud Identity Group Factory](./factories/cloud-identity-group-factory), [Google Cloud BQ Factory](./factories/bigquery-factory), [Google Cloud VPC Firewall Factory](./factories/net-vpc-firewall-yaml), [Minimal Project Factory](./factories/project-factory)
|
||||||
- **GKE** - [Binary Authorization Pipeline Blueprint](./gke/binauthz), [Storage API](./gke/binauthz/image), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api), [GKE Multitenant Blueprint](./gke/multitenant-fleet), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [GKE Autopilot](./gke/autopilot)
|
- **GKE** - [Binary Authorization Pipeline Blueprint](./gke/binauthz), [Storage API](./gke/binauthz/image), [Multi-cluster mesh on GKE (fleet API)](./gke/multi-cluster-mesh-gke-fleet-api), [GKE Multitenant Blueprint](./gke/multitenant-fleet), [Shared VPC with GKE support](./networking/shared-vpc-gke/), [GKE Autopilot](./gke/autopilot)
|
||||||
- **networking** - [Calling a private Cloud Function from On-premises](./networking/private-cloud-function-from-onprem), [Decentralized firewall management](./networking/decentralized-firewall), [Decentralized firewall validator](./networking/decentralized-firewall/validator), [Network filtering with Squid](./networking/filtering-proxy), [GLB and multi-regional daisy-chaining through hybrid NEGs](./networking/glb-hybrid-neg-internal), [Hybrid connectivity to on-premise services through PSC](./networking/psc-hybrid), [HTTP Load Balancer with Cloud Armor](./networking/glb-and-armor), [Hub and Spoke via VPN](./networking/hub-and-spoke-vpn), [Hub and Spoke via VPC Peering](./networking/hub-and-spoke-peering), [Internal Load Balancer as Next Hop](./networking/ilb-next-hop), [Network filtering with Squid with isolated VPCs using Private Service Connect](./networking/filtering-proxy-psc), On-prem DNS and Google Private Access, [PSC Producer](./networking/psc-hybrid/psc-producer), [PSC Consumer](./networking/psc-hybrid/psc-consumer), [Shared VPC with optional GKE cluster](./networking/shared-vpc-gke)
|
- **networking** - [Calling a private Cloud Function from On-premises](./networking/private-cloud-function-from-onprem), [Decentralized firewall management](./networking/decentralized-firewall), [Decentralized firewall validator](./networking/decentralized-firewall/validator), [Network filtering with Squid](./networking/filtering-proxy), [GLB and multi-regional daisy-chaining through hybrid NEGs](./networking/glb-hybrid-neg-internal), [Hybrid connectivity to on-premise services through PSC](./networking/psc-hybrid), [HTTP Load Balancer with Cloud Armor](./networking/glb-and-armor), [Hub and Spoke via VPN](./networking/hub-and-spoke-vpn), [Hub and Spoke via VPC Peering](./networking/hub-and-spoke-peering), [Internal Load Balancer as Next Hop](./networking/ilb-next-hop), [Network filtering with Squid with isolated VPCs using Private Service Connect](./networking/filtering-proxy-psc), On-prem DNS and Google Private Access, [PSC Producer](./networking/psc-hybrid/psc-producer), [PSC Consumer](./networking/psc-hybrid/psc-consumer), [Shared VPC with optional GKE cluster](./networking/shared-vpc-gke)
|
||||||
|
|
|
@ -443,5 +443,15 @@
|
||||||
"mode": "NULLABLE",
|
"mode": "NULLABLE",
|
||||||
"name": "x_apigee_mintng_price_multiplier",
|
"name": "x_apigee_mintng_price_multiplier",
|
||||||
"type": "NUMERIC"
|
"type": "NUMERIC"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "NULLABLE",
|
||||||
|
"name": "sense_flag_headers",
|
||||||
|
"type": "STRING"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "NULLABLE",
|
||||||
|
"name": "sense_action_id",
|
||||||
|
"type": "STRING"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -43,9 +43,7 @@ module "project" {
|
||||||
module.function_gcs2bq.service_account_iam_email
|
module.function_gcs2bq.service_account_iam_email
|
||||||
]
|
]
|
||||||
"roles/logging.logWriter" = [
|
"roles/logging.logWriter" = [
|
||||||
module.function_export.service_account_iam_email
|
module.function_export.service_account_iam_email,
|
||||||
]
|
|
||||||
"roles/logging.logWriter" = [
|
|
||||||
module.function_gcs2bq.service_account_iam_email
|
module.function_gcs2bq.service_account_iam_email
|
||||||
]
|
]
|
||||||
"roles/apigee.admin" = [
|
"roles/apigee.admin" = [
|
||||||
|
@ -182,10 +180,12 @@ module "function_export" {
|
||||||
DATASTORE = var.datastore_name
|
DATASTORE = var.datastore_name
|
||||||
}
|
}
|
||||||
trigger_config = {
|
trigger_config = {
|
||||||
|
v1 = {
|
||||||
event = "google.pubsub.topic.publish"
|
event = "google.pubsub.topic.publish"
|
||||||
resource = module.pubsub_export.id
|
resource = module.pubsub_export.id
|
||||||
retry = null
|
retry = null
|
||||||
}
|
}
|
||||||
|
}
|
||||||
service_account_create = true
|
service_account_create = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,10 +218,12 @@ module "function_gcs2bq" {
|
||||||
LOCATION = var.organization.analytics_region
|
LOCATION = var.organization.analytics_region
|
||||||
}
|
}
|
||||||
trigger_config = {
|
trigger_config = {
|
||||||
|
v1 = {
|
||||||
event = "google.pubsub.topic.publish"
|
event = "google.pubsub.topic.publish"
|
||||||
resource = module.bucket_export.topic
|
resource = module.bucket_export.topic
|
||||||
retry = null
|
retry = null
|
||||||
}
|
}
|
||||||
|
}
|
||||||
service_account_create = true
|
service_account_create = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +289,7 @@ resource "local_file" "create_datastore_file" {
|
||||||
path = var.path
|
path = var.path
|
||||||
})
|
})
|
||||||
filename = "${path.module}/create-datastore.sh"
|
filename = "${path.module}/create-datastore.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "local_file" "deploy_apiproxy_file" {
|
resource "local_file" "deploy_apiproxy_file" {
|
||||||
|
@ -295,5 +297,5 @@ resource "local_file" "deploy_apiproxy_file" {
|
||||||
org_name = module.apigee.org_name
|
org_name = module.apigee.org_name
|
||||||
})
|
})
|
||||||
filename = "${path.module}/deploy-apiproxy.sh"
|
filename = "${path.module}/deploy-apiproxy.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,3 @@ curl -v -X POST \
|
||||||
curl -v -X POST \
|
curl -v -X POST \
|
||||||
-H "Authorization: Bearer $TOKEN" \
|
-H "Authorization: Bearer $TOKEN" \
|
||||||
"https://apigee.googleapis.com/v1/organizations/$ORG_NAME/environments/$ENV_NAME/apis/httpbin/revisions/1/deployments"
|
"https://apigee.googleapis.com/v1/organizations/$ORG_NAME/environments/$ENV_NAME/apis/httpbin/revisions/1/deployments"
|
||||||
|
|
||||||
curl -v \
|
|
||||||
-H "Authorization: Bearer $TOKEN" \
|
|
||||||
"https://apigee.googleapis.com/v1/organizations/$ORG_NAME/environments/$ENV_NAME/apis/httpbin/revisions/1/deployments"
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ resource "local_file" "vars_file" {
|
||||||
ingress_ip_name = local.ingress_ip_name
|
ingress_ip_name = local.ingress_ip_name
|
||||||
})
|
})
|
||||||
filename = "${path.module}/ansible/vars/vars.yaml"
|
filename = "${path.module}/ansible/vars/vars.yaml"
|
||||||
file_permission = "0666"
|
file_permission = "0644"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "local_file" "gssh_file" {
|
resource "local_file" "gssh_file" {
|
||||||
|
@ -36,5 +36,5 @@ resource "local_file" "gssh_file" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
})
|
})
|
||||||
filename = "${path.module}/ansible/gssh.sh"
|
filename = "${path.module}/ansible/gssh.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,5 +89,5 @@ resource "local_file" "deploy_apiproxy_file" {
|
||||||
org = module.project.project_id
|
org = module.project.project_id
|
||||||
})
|
})
|
||||||
filename = "${path.module}/deploy-apiproxy.sh"
|
filename = "${path.module}/deploy-apiproxy.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
module "cluster" {
|
module "cluster" {
|
||||||
source = "../../../modules/gke-cluster"
|
source = "../../../modules/gke-cluster-standard"
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
name = "cluster"
|
name = "cluster"
|
||||||
location = var.region
|
location = var.region
|
||||||
|
|
|
@ -6,7 +6,7 @@ The architecture is the one depicted below.
|
||||||
|
|
||||||
![Diagram](diagram.png)
|
![Diagram](diagram.png)
|
||||||
|
|
||||||
To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accesible through VPN.
|
To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accessible through VPN.
|
||||||
|
|
||||||
## Running the blueprint
|
## Running the blueprint
|
||||||
|
|
||||||
|
|
|
@ -37,5 +37,5 @@ resource "local_file" "deploy_apiproxy_file" {
|
||||||
environment = local.environment
|
environment = local.environment
|
||||||
})
|
})
|
||||||
filename = "${path.module}/deploy-apiproxy.sh"
|
filename = "${path.module}/deploy-apiproxy.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,7 @@ resource "local_file" "vars_file" {
|
||||||
adfs_dns_domain_name = var.adfs_dns_domain_name
|
adfs_dns_domain_name = var.adfs_dns_domain_name
|
||||||
})
|
})
|
||||||
filename = "${path.module}/ansible/vars/vars.yaml"
|
filename = "${path.module}/ansible/vars/vars.yaml"
|
||||||
file_permission = "0666"
|
file_permission = "0644"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "local_file" "gssh_file" {
|
resource "local_file" "gssh_file" {
|
||||||
|
@ -139,5 +139,5 @@ resource "local_file" "gssh_file" {
|
||||||
project_id = var.project_id
|
project_id = var.project_id
|
||||||
})
|
})
|
||||||
filename = "${path.module}/ansible/gssh.sh"
|
filename = "${path.module}/ansible/gssh.sh"
|
||||||
file_permission = "0777"
|
file_permission = "0755"
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,6 +76,7 @@ Refer to the [Cloud Function deployment instructions](./deploy-cloud-function/)
|
||||||
- The tool assumes global routing is ON, this impacts dynamic routes usage calculation.
|
- The tool assumes global routing is ON, this impacts dynamic routes usage calculation.
|
||||||
- The tool assumes custom routes importing/exporting is ON, this impacts static and dynamic routes usage calculation.
|
- The tool assumes custom routes importing/exporting is ON, this impacts static and dynamic routes usage calculation.
|
||||||
- The tool assumes all networks in peering groups have the same global routing and custom routes sharing configuration.
|
- The tool assumes all networks in peering groups have the same global routing and custom routes sharing configuration.
|
||||||
|
- The tool assumes both Shared VPCs service projects and host projects to be in scope.
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ A few configuration values for the function which are relevant to this example c
|
||||||
|
|
||||||
## Discovery configuration
|
## Discovery configuration
|
||||||
|
|
||||||
Discovery configuration is done via the `discovery_config` variable, which mimicks the set of options available when running the discovery tool in cli mode. Pay particular care in defining the right top-level scope via the `discovery_root` attribute, as this is the root of the hierarchy used to discover Compute resources and it needs to include the individual folders and projects that needs to be monitored, which are defined via the `monitored_folders` and `monitored_projects` attributes.
|
Discovery configuration is done via the `discovery_config` variable, which mimics the set of options available when running the discovery tool in cli mode. Pay particular care in defining the right top-level scope via the `discovery_root` attribute, as this is the root of the hierarchy used to discover Compute resources and it needs to include the individual folders and projects that needs to be monitored, which are defined via the `monitored_folders` and `monitored_projects` attributes.
|
||||||
|
|
||||||
The following schematic diagram of a resource hierarchy illustrates the interplay between root scope and monitored resources. The root scope is set to the top-level red folder and completely encloses every resource that needs to be monitored. The blue folder and project are set as monitored defining the actual perimeter used to discover resources. Note that setting the root scope to the blue folder would have resulted in the rightmost project being excluded.
|
The following schematic diagram of a resource hierarchy illustrates the interplay between root scope and monitored resources. The root scope is set to the top-level red folder and completely encloses every resource that needs to be monitored. The blue folder and project are set as monitored defining the actual perimeter used to discover resources. Note that setting the root scope to the blue folder would have resulted in the rightmost project being excluded.
|
||||||
|
|
||||||
|
@ -64,17 +64,18 @@ dashboard_json_path = "../dashboards/quotas-utilization.json"
|
||||||
|
|
||||||
| name | description | type | required | default |
|
| name | description | type | required | default |
|
||||||
|---|---|:---:|:---:|:---:|
|
|---|---|:---:|:---:|:---:|
|
||||||
| [discovery_config](variables.tf#L44) | Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empy, every project under the discovery root node will be monitored. | <code title="object({ discovery_root = string monitored_folders = list(string) monitored_projects = list(string) custom_quota_file = optional(string) })">object({…})</code> | ✓ | |
|
| [discovery_config](variables.tf#L48) | Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empty, every project under the discovery root node will be monitored. | <code title="object({ discovery_root = string monitored_folders = list(string) monitored_projects = list(string) custom_quota_file = optional(string) })">object({…})</code> | ✓ | |
|
||||||
| [project_id](variables.tf#L90) | Project id where the Cloud Function will be deployed. | <code>string</code> | ✓ | |
|
| [project_id](variables.tf#L100) | Project id where the Cloud Function will be deployed. | <code>string</code> | ✓ | |
|
||||||
| [bundle_path](variables.tf#L17) | Path used to write the intermediate Cloud Function code bundle. | <code>string</code> | | <code>"./bundle.zip"</code> |
|
| [bundle_path](variables.tf#L17) | Path used to write the intermediate Cloud Function code bundle. | <code>string</code> | | <code>"./bundle.zip"</code> |
|
||||||
| [cloud_function_config](variables.tf#L23) | Optional Cloud Function configuration. | <code title="object({ bucket_name = optional(string) build_worker_pool_id = optional(string) bundle_path = optional(string, "./bundle.zip") debug = optional(bool, false) memory_mb = optional(number, 256) source_dir = optional(string, "../src") timeout_seconds = optional(number, 540) })">object({…})</code> | | <code>{}</code> |
|
| [cloud_function_config](variables.tf#L23) | Optional Cloud Function configuration. | <code title="object({ bucket_name = optional(string) build_worker_pool_id = optional(string) bundle_path = optional(string, "./bundle.zip") debug = optional(bool, false) memory_mb = optional(number, 256) source_dir = optional(string, "../src") timeout_seconds = optional(number, 540) vpc_connector = optional(object({ name = string egress_settings = optional(string, "ALL_TRAFFIC") })) })">object({…})</code> | | <code>{}</code> |
|
||||||
| [dashboard_json_path](variables.tf#L38) | Optional monitoring dashboard to deploy. | <code>string</code> | | <code>null</code> |
|
| [dashboard_json_path](variables.tf#L42) | Optional monitoring dashboard to deploy. | <code>string</code> | | <code>null</code> |
|
||||||
| [grant_discovery_iam_roles](variables.tf#L62) | Optionally grant required IAM roles to Cloud Function service account. | <code>bool</code> | | <code>false</code> |
|
| [grant_discovery_iam_roles](variables.tf#L66) | Optionally grant required IAM roles to Cloud Function service account. | <code>bool</code> | | <code>false</code> |
|
||||||
| [labels](variables.tf#L69) | Billing labels used for the Cloud Function, and the project if project_create is true. | <code>map(string)</code> | | <code>{}</code> |
|
| [labels](variables.tf#L73) | Billing labels used for the Cloud Function, and the project if project_create is true. | <code>map(string)</code> | | <code>{}</code> |
|
||||||
| [name](variables.tf#L75) | Name used to create Cloud Function related resources. | <code>string</code> | | <code>"net-dash"</code> |
|
| [monitoring_project](variables.tf#L79) | Project where generated metrics will be written. Default is to use the same project where the Cloud Function is deployed. | <code>string</code> | | <code>null</code> |
|
||||||
| [project_create_config](variables.tf#L81) | Optional configuration if project creation is required. | <code title="object({ billing_account_id = string parent_id = optional(string) })">object({…})</code> | | <code>null</code> |
|
| [name](variables.tf#L85) | Name used to create Cloud Function related resources. | <code>string</code> | | <code>"net-dash"</code> |
|
||||||
| [region](variables.tf#L95) | Compute region where the Cloud Function will be deployed. | <code>string</code> | | <code>"europe-west1"</code> |
|
| [project_create_config](variables.tf#L91) | Optional configuration if project creation is required. | <code title="object({ billing_account_id = string parent_id = optional(string) })">object({…})</code> | | <code>null</code> |
|
||||||
| [schedule_config](variables.tf#L101) | Schedule timer configuration in crontab format. | <code>string</code> | | <code>"*/30 * * * *"</code> |
|
| [region](variables.tf#L105) | Compute region where the Cloud Function will be deployed. | <code>string</code> | | <code>"europe-west1"</code> |
|
||||||
|
| [schedule_config](variables.tf#L111) | Schedule timer configuration in crontab format. | <code>string</code> | | <code>"*/30 * * * *"</code> |
|
||||||
|
|
||||||
## Outputs
|
## Outputs
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,15 @@ module "cloud-function" {
|
||||||
resource = module.pubsub.topic.id
|
resource = module.pubsub.topic.id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
vpc_connector = (
|
||||||
|
var.cloud_function_config.vpc_connector == null
|
||||||
|
? null
|
||||||
|
: {
|
||||||
|
create = false
|
||||||
|
name = var.cloud_function_config.vpc_connector.name
|
||||||
|
egress_settings = var.cloud_function_config.vpc_connector.egress_settings
|
||||||
|
}
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_cloud_scheduler_job" "default" {
|
resource "google_cloud_scheduler_job" "default" {
|
||||||
|
@ -97,7 +106,11 @@ resource "google_cloud_scheduler_job" "default" {
|
||||||
discovery_root = var.discovery_config.discovery_root
|
discovery_root = var.discovery_config.discovery_root
|
||||||
folders = var.discovery_config.monitored_folders
|
folders = var.discovery_config.monitored_folders
|
||||||
projects = var.discovery_config.monitored_projects
|
projects = var.discovery_config.monitored_projects
|
||||||
monitoring_project = module.project.project_id
|
monitoring_project = (
|
||||||
|
var.monitoring_project == null
|
||||||
|
? module.project.project_id
|
||||||
|
: var.monitoring_project
|
||||||
|
)
|
||||||
custom_quota = (
|
custom_quota = (
|
||||||
var.discovery_config.custom_quota_file == null
|
var.discovery_config.custom_quota_file == null
|
||||||
? { networks = {}, projects = {} }
|
? { networks = {}, projects = {} }
|
||||||
|
|
|
@ -30,6 +30,10 @@ variable "cloud_function_config" {
|
||||||
memory_mb = optional(number, 256)
|
memory_mb = optional(number, 256)
|
||||||
source_dir = optional(string, "../src")
|
source_dir = optional(string, "../src")
|
||||||
timeout_seconds = optional(number, 540)
|
timeout_seconds = optional(number, 540)
|
||||||
|
vpc_connector = optional(object({
|
||||||
|
name = string
|
||||||
|
egress_settings = optional(string, "ALL_TRAFFIC")
|
||||||
|
}))
|
||||||
})
|
})
|
||||||
default = {}
|
default = {}
|
||||||
nullable = false
|
nullable = false
|
||||||
|
@ -42,7 +46,7 @@ variable "dashboard_json_path" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "discovery_config" {
|
variable "discovery_config" {
|
||||||
description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empy, every project under the discovery root node will be monitored."
|
description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empty, every project under the discovery root node will be monitored."
|
||||||
type = object({
|
type = object({
|
||||||
discovery_root = string
|
discovery_root = string
|
||||||
monitored_folders = list(string)
|
monitored_folders = list(string)
|
||||||
|
@ -72,6 +76,12 @@ variable "labels" {
|
||||||
default = {}
|
default = {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "monitoring_project" {
|
||||||
|
description = "Project where generated metrics will be written. Default is to use the same project where the Cloud Function is deployed."
|
||||||
|
type = string
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
variable "name" {
|
variable "name" {
|
||||||
description = "Name used to create Cloud Function related resources."
|
description = "Name used to create Cloud Function related resources."
|
||||||
type = string
|
type = string
|
||||||
|
|
|
@ -2,9 +2,10 @@
|
||||||
|
|
||||||
This tool constitutes the discovery and data gathering side of the Network Dashboard, and can be used in combination with the related [Terraform deployment examples](../), or packaged in different ways including standalone manual use.
|
This tool constitutes the discovery and data gathering side of the Network Dashboard, and can be used in combination with the related [Terraform deployment examples](../), or packaged in different ways including standalone manual use.
|
||||||
|
|
||||||
- [Quick Usage Example](#quick-usage-example)
|
- [Network Dashboard Discovery Tool](#network-dashboard-discovery-tool)
|
||||||
- [High Level Architecture and Plugin Design](#high-level-architecture-and-plugin-design)
|
- [Quick Usage Example](#quick-usage-example)
|
||||||
- [Debugging and Troubleshooting](#debugging-and-troubleshooting)
|
- [High Level Architecture and Plugin Design](#high-level-architecture-and-plugin-design)
|
||||||
|
- [Debugging and Troubleshooting](#debugging-and-troubleshooting)
|
||||||
|
|
||||||
## Quick Usage Example
|
## Quick Usage Example
|
||||||
|
|
||||||
|
@ -48,7 +49,7 @@ A typical invocation might look like this:
|
||||||
```bash
|
```bash
|
||||||
./main.py \
|
./main.py \
|
||||||
-dr organizations/1234567890 \
|
-dr organizations/1234567890 \
|
||||||
-op my-monitoring-project \
|
-mon my-monitoring-project \
|
||||||
--folder 1234567890 --folder 987654321 \
|
--folder 1234567890 --folder 987654321 \
|
||||||
--project my-net-project \
|
--project my-net-project \
|
||||||
--custom-quota-file custom-quotas.yaml
|
--custom-quota-file custom-quotas.yaml
|
||||||
|
@ -75,7 +76,7 @@ The main module cycles through stages, calling stage plugins in succession itera
|
||||||
|
|
||||||
## Debugging and Troubleshooting
|
## Debugging and Troubleshooting
|
||||||
|
|
||||||
Note that python version > 3.8 is required.
|
Note that python version >= 3.10 is required.
|
||||||
|
|
||||||
If you run into a `ModuleNotFoundError`, install the required dependencies:
|
If you run into a `ModuleNotFoundError`, install the required dependencies:
|
||||||
`pip3 install -r requirements.txt`
|
`pip3 install -r requirements.txt`
|
||||||
|
@ -90,7 +91,7 @@ This is an example call that stores discovery results to a file:
|
||||||
```bash
|
```bash
|
||||||
./main.py \
|
./main.py \
|
||||||
-dr organizations/1234567890 \
|
-dr organizations/1234567890 \
|
||||||
-op my-monitoring-project \
|
-mon my-monitoring-project \
|
||||||
--folder 1234567890 --folder 987654321 \
|
--folder 1234567890 --folder 987654321 \
|
||||||
--project my-net-project \
|
--project my-net-project \
|
||||||
--custom-quota-file custom-quotas.yaml \
|
--custom-quota-file custom-quotas.yaml \
|
||||||
|
@ -102,7 +103,7 @@ And this is the corresponding call that skips the discovery phase and also runs
|
||||||
```bash
|
```bash
|
||||||
./main.py \
|
./main.py \
|
||||||
-dr organizations/1234567890 \
|
-dr organizations/1234567890 \
|
||||||
-op my-monitoring-project \
|
-mon my-monitoring-project \
|
||||||
--folder 1234567890 --folder 987654321 \
|
--folder 1234567890 --folder 987654321 \
|
||||||
--project my-net-project \
|
--project my-net-project \
|
||||||
--custom-quota-file custom-quotas.yaml \
|
--custom-quota-file custom-quotas.yaml \
|
||||||
|
|
|
@ -80,8 +80,9 @@ def do_discovery(resources):
|
||||||
resources[result.type][result.id][result.key] = result.data
|
resources[result.type][result.id][result.key] = result.data
|
||||||
else:
|
else:
|
||||||
resources[result.type][result.id] = result.data
|
resources[result.type][result.id] = result.data
|
||||||
LOGGER.info('discovery end {}'.format(
|
LOGGER.info('discovery end {}'.format({
|
||||||
{k: len(v) for k, v in resources.items() if not isinstance(v, str)}))
|
k: len(v) for k, v in resources.items() if not isinstance(v, str)
|
||||||
|
}))
|
||||||
|
|
||||||
|
|
||||||
def do_init(resources, discovery_root, monitoring_project, folders=None,
|
def do_init(resources, discovery_root, monitoring_project, folders=None,
|
||||||
|
|
|
@ -33,7 +33,8 @@ def init(resources):
|
||||||
def start_discovery(resources, response=None):
|
def start_discovery(resources, response=None):
|
||||||
'Plugin entry point, group and return discovered networks.'
|
'Plugin entry point, group and return discovered networks.'
|
||||||
LOGGER.info(f'discovery (has response: {response is not None})')
|
LOGGER.info(f'discovery (has response: {response is not None})')
|
||||||
grouped = itertools.groupby(resources['networks'].values(),
|
grouped = itertools.groupby(
|
||||||
lambda v: v['project_id'])
|
sorted(resources['networks'].values(), key=lambda i: i['project_id']),
|
||||||
|
lambda i: i['project_id'])
|
||||||
for project_id, vpcs in grouped:
|
for project_id, vpcs in grouped:
|
||||||
yield Resource(NAME, project_id, [v['self_link'] for v in vpcs])
|
yield Resource(NAME, project_id, [v['self_link'] for v in vpcs])
|
||||||
|
|
|
@ -38,8 +38,9 @@ def timeseries(resources):
|
||||||
yield MetricDescriptor(f'project/{dtype}', name, ('project',),
|
yield MetricDescriptor(f'project/{dtype}', name, ('project',),
|
||||||
dtype.endswith('ratio'))
|
dtype.endswith('ratio'))
|
||||||
# group firewall rules by network then prepare and return timeseries
|
# group firewall rules by network then prepare and return timeseries
|
||||||
grouped = itertools.groupby(resources['firewall_rules'].values(),
|
grouped = itertools.groupby(
|
||||||
lambda v: v['network'])
|
sorted(resources['firewall_rules'].values(), key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
for network_id, rules in grouped:
|
for network_id, rules in grouped:
|
||||||
count = len(list(rules))
|
count = len(list(rules))
|
||||||
labels = {
|
labels = {
|
||||||
|
@ -48,8 +49,9 @@ def timeseries(resources):
|
||||||
}
|
}
|
||||||
yield TimeSeries('network/firewall_rules_used', count, labels)
|
yield TimeSeries('network/firewall_rules_used', count, labels)
|
||||||
# group firewall rules by project then prepare and return timeseries
|
# group firewall rules by project then prepare and return timeseries
|
||||||
grouped = itertools.groupby(resources['firewall_rules'].values(),
|
grouped = itertools.groupby(
|
||||||
lambda v: v['project_id'])
|
sorted(resources['firewall_rules'].values(),
|
||||||
|
key=lambda i: i['project_id']), lambda i: i['project_id'])
|
||||||
for project_id, rules in grouped:
|
for project_id, rules in grouped:
|
||||||
count = len(list(rules))
|
count = len(list(rules))
|
||||||
limit = int(resources['quota'][project_id]['global']['FIREWALLS'])
|
limit = int(resources['quota'][project_id]['global']['FIREWALLS'])
|
||||||
|
|
|
@ -81,8 +81,12 @@ def _forwarding_rules(resources):
|
||||||
forwarding_rules_l7 = itertools.filterfalse(
|
forwarding_rules_l7 = itertools.filterfalse(
|
||||||
functools.partial(filter, 'INTERNAL_MANAGED'), forwarding_rules)
|
functools.partial(filter, 'INTERNAL_MANAGED'), forwarding_rules)
|
||||||
# group each iterator by network and return timeseries
|
# group each iterator by network and return timeseries
|
||||||
grouped_l4 = itertools.groupby(forwarding_rules_l4, lambda i: i['network'])
|
grouped_l4 = itertools.groupby(
|
||||||
grouped_l7 = itertools.groupby(forwarding_rules_l7, lambda i: i['network'])
|
sorted(forwarding_rules_l4, key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
|
grouped_l7 = itertools.groupby(
|
||||||
|
sorted(forwarding_rules_l7, key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
return itertools.chain(
|
return itertools.chain(
|
||||||
_group_timeseries('forwarding_rules_l4', resources, grouped_l4,
|
_group_timeseries('forwarding_rules_l4', resources, grouped_l4,
|
||||||
'INTERNAL_FORWARDING_RULES_PER_NETWORK'),
|
'INTERNAL_FORWARDING_RULES_PER_NETWORK'),
|
||||||
|
@ -95,7 +99,9 @@ def _instances(resources):
|
||||||
'Groups instances by network and returns relevant timeseries.'
|
'Groups instances by network and returns relevant timeseries.'
|
||||||
instance_networks = itertools.chain.from_iterable(
|
instance_networks = itertools.chain.from_iterable(
|
||||||
i['networks'] for i in resources['instances'].values())
|
i['networks'] for i in resources['instances'].values())
|
||||||
grouped = itertools.groupby(instance_networks, lambda i: i['network'])
|
grouped = itertools.groupby(
|
||||||
|
sorted(instance_networks, key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
return _group_timeseries('instances', resources, grouped,
|
return _group_timeseries('instances', resources, grouped,
|
||||||
'INSTANCES_PER_NETWORK_GLOBAL')
|
'INSTANCES_PER_NETWORK_GLOBAL')
|
||||||
|
|
||||||
|
@ -120,8 +126,9 @@ def _peerings(resources):
|
||||||
|
|
||||||
def _subnet_ranges(resources):
|
def _subnet_ranges(resources):
|
||||||
'Groups subnetworks by network and returns relevant timeseries.'
|
'Groups subnetworks by network and returns relevant timeseries.'
|
||||||
grouped = itertools.groupby(resources['subnetworks'].values(),
|
grouped = itertools.groupby(
|
||||||
lambda v: v['network'])
|
sorted(resources['subnetworks'].values(), key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
return _group_timeseries('subnets', resources, grouped,
|
return _group_timeseries('subnets', resources, grouped,
|
||||||
'SUBNET_RANGES_PER_NETWORK')
|
'SUBNET_RANGES_PER_NETWORK')
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,8 @@ def _static(resources):
|
||||||
'Computes network and project-level timeseries for dynamic routes.'
|
'Computes network and project-level timeseries for dynamic routes.'
|
||||||
filter = lambda v: v['next_hop_type'] in ('peering', 'network')
|
filter = lambda v: v['next_hop_type'] in ('peering', 'network')
|
||||||
routes = itertools.filterfalse(filter, resources['routes'].values())
|
routes = itertools.filterfalse(filter, resources['routes'].values())
|
||||||
grouped = itertools.groupby(routes, lambda v: v['network'])
|
grouped = itertools.groupby(sorted(routes, key=lambda i: i['network']),
|
||||||
|
lambda i: i['network'])
|
||||||
project_counts = {}
|
project_counts = {}
|
||||||
for network_id, elements in grouped:
|
for network_id, elements in grouped:
|
||||||
network = resources['networks'].get(network_id)
|
network = resources['networks'].get(network_id)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
click==8.1.3
|
click==8.1.3
|
||||||
google-auth==2.14.1
|
google-auth==2.14.1
|
||||||
PyYAML==6.0
|
PyYAML==6.0
|
||||||
requests==2.28.1
|
requests==2.31.0
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ Building Compute Engine image (Packer part):
|
||||||
|
|
||||||
## Using Packer's service account
|
## Using Packer's service account
|
||||||
|
|
||||||
The following blueprint leverages [service account impersonation](https://cloud.google.com/iam/docs/impersonating-service-accounts)
|
The following blueprint leverages [service account impersonation](https://cloud.google.com/iam/docs/service-account-overview#impersonation)
|
||||||
to execute any operations on GCP as a dedicated Packer service account. Depending on how you execute
|
to execute any operations on GCP as a dedicated Packer service account. Depending on how you execute
|
||||||
the Packer tool, you need to grant your principal rights to impersonate Packer's service account.
|
the Packer tool, you need to grant your principal rights to impersonate Packer's service account.
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ The image is provisioned with a sample shell scripts to update OS packages and i
|
||||||
|
|
||||||
The example uses following GCP features:
|
The example uses following GCP features:
|
||||||
|
|
||||||
* [service account impersonation](https://cloud.google.com/iam/docs/impersonating-service-accounts)
|
* [service account impersonation](https://cloud.google.com/iam/docs/service-account-overview#impersonation)
|
||||||
* [Identity-Aware Proxy](https://cloud.google.com/iap/docs/using-tcp-forwarding) tunnel
|
* [Identity-Aware Proxy](https://cloud.google.com/iap/docs/using-tcp-forwarding) tunnel
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ Labels are set with project id (which may differ from the monitoring workspace p
|
||||||
|
|
||||||
<img src="explorer.png" width="640px" alt="GCP Metrics Explorer, usage, limit and utilization view sample">
|
<img src="explorer.png" width="640px" alt="GCP Metrics Explorer, usage, limit and utilization view sample">
|
||||||
|
|
||||||
The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been creaed.
|
The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been created.
|
||||||
|
|
||||||
## Running the blueprint
|
## Running the blueprint
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,13 +36,13 @@ Once done testing, you can clean up resources by running `terraform destroy`. To
|
||||||
|
|
||||||
Once resources are created, you can run queries on the data you exported on Bigquery. [Here](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery#querying_an_asset_snapshot) you can find some blueprint of queries you can run.
|
Once resources are created, you can run queries on the data you exported on Bigquery. [Here](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery#querying_an_asset_snapshot) you can find some blueprint of queries you can run.
|
||||||
|
|
||||||
You can also create a dashboard connecting [Datalab](https://datastudio.google.com/) or any other BI tools of your choice to your Bigquery dataset.
|
You can also create a dashboard connecting [Looker Studio](https://lookerstudio.google.com/) or any other BI tools of your choice to your Bigquery dataset.
|
||||||
|
|
||||||
## File exporter for JSON, CSV (optional).
|
## File exporter for JSON, CSV (optional).
|
||||||
|
|
||||||
This is an optional part.
|
This is an optional part.
|
||||||
|
|
||||||
Regular file-based exports of data from Cloud Asset Inventory may be useful for e.g. scale-out network dependencies discovery tools like [Planet Exporter](https://github.com/williamchanrico/planet-exporter), or to update legacy workloads tracking or configuration management systems. Bigquery supports multiple [export formats](https://cloud.google.com/bigquery/docs/exporting-data#export_formats_and_compression_types) and one may upload objects to Storage Bucket using provided Cloud Function. Specify `job.DestinationFormat` as defined in [documentation](https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.job.DestinationFormat.html), e.g. `NEWLINE_DELIMITED_JSON`.
|
Regular file-based exports of data from Cloud Asset Inventory may be useful for e.g. scale-out network dependencies discovery tools like [Planet Exporter](https://github.com/williamchanrico/planet-exporter), or to update legacy workloads tracking or configuration management systems. Bigquery supports multiple [export formats](https://cloud.google.com/bigquery/docs/exporting-data#export_formats_and_compression_types) and one may upload objects to Storage Bucket using provided Cloud Function. Specify `job.DestinationFormat` as defined in [documentation](https://cloud.google.com/python/docs/reference/bigquery/latest/google.cloud.bigquery.job.DestinationFormat), e.g. `NEWLINE_DELIMITED_JSON`.
|
||||||
|
|
||||||
It helps to create custom [scheduled query](https://cloud.google.com/bigquery/docs/scheduling-queries#console) from CAI export tables, and to write out results in to dedicated table (with overwrites). Define such query's output columns to comply with downstream systems' fields requirements, and time query execution after CAI export into BQ for freshness. See [sample queries](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery-sample-queries).
|
It helps to create custom [scheduled query](https://cloud.google.com/bigquery/docs/scheduling-queries#console) from CAI export tables, and to write out results in to dedicated table (with overwrites). Define such query's output columns to comply with downstream systems' fields requirements, and time query execution after CAI export into BQ for freshness. See [sample queries](https://cloud.google.com/asset-inventory/docs/exporting-to-bigquery-sample-queries).
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ module "sa-tfc" {
|
||||||
|
|
||||||
iam = {
|
iam = {
|
||||||
# We allow only tokens generated by a specific TFC workspace impersonation of the service account,
|
# We allow only tokens generated by a specific TFC workspace impersonation of the service account,
|
||||||
# that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specifc SA
|
# that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specific SA
|
||||||
"roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfc-pool.name}/attribute.terraform_workspace_id/${var.tfc_workspace_id}"]
|
"roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfc-pool.name}/attribute.terraform_workspace_id/${var.tfc_workspace_id}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ func getEnv(key, fallback string) string {
|
||||||
return fallback
|
return fallback
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConfiguration generates configration by reading ENV variables.
|
// GetConfiguration generates configuration by reading ENV variables.
|
||||||
func GetConfiguration() (*Configuration, error) {
|
func GetConfiguration() (*Configuration, error) {
|
||||||
timeout, err := time.ParseDuration(getEnv("TIMEOUT", "1000ms"))
|
timeout, err := time.ParseDuration(getEnv("TIMEOUT", "1000ms"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# M4CE(v5) - ESXi Connector
|
# M4CE(v5) - ESXi Connector
|
||||||
|
|
||||||
This blueprint deploys a virtual machine from an OVA image and the security prerequisites to run the Migrate for Compute Engine (v5) [connector](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector) on VMWare ESXi.
|
This blueprint deploys a virtual machine from an OVA image and the security prerequisites to run the Migrate for Compute Engine (v5) [connector](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector) on VMWare ESXi.
|
||||||
|
|
||||||
The blueprint is designed to deploy the M4CE (v5) connector on and existing VMWare environment. The [network configuration](https://cloud.google.com/migrate/compute-engine/docs/5.0/concepts/architecture#migration_architecture) required to allow the communication of the migrate connector to the GCP API is not included in this blueprint.
|
The blueprint is designed to deploy the M4CE (v5) connector on and existing VMWare environment. The [network configuration](https://cloud.google.com/migrate/compute-engine/docs/5.0/concepts/architecture#migration_architecture) required to allow the communication of the migrate connector to the GCP API is not included in this blueprint.
|
||||||
|
|
||||||
|
@ -13,9 +13,9 @@ This is the high level diagram:
|
||||||
This sample creates several distinct groups of resources:
|
This sample creates several distinct groups of resources:
|
||||||
|
|
||||||
- virtual machine
|
- virtual machine
|
||||||
- [M4CE migrate connector](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector#installing_the_migrate_connector)
|
- [M4CE migrate connector](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector#step-InstallMigrateConnector)
|
||||||
- IAM
|
- IAM
|
||||||
- [vCenter user role](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector#step-1)
|
- [vCenter user role](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector#step-1)
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
|
|
@ -16,9 +16,9 @@ This sample creates\updates several distinct groups of resources:
|
||||||
- Deploy M4CE host project with [required services](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#enabling_required_services_on_the_host_project) on a new or existing project.
|
- Deploy M4CE host project with [required services](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#enabling_required_services_on_the_host_project) on a new or existing project.
|
||||||
- M4CE target project prerequisites deployed on existing projects.
|
- M4CE target project prerequisites deployed on existing projects.
|
||||||
- IAM
|
- IAM
|
||||||
- Create a [service account](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
- Create a [service account](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
||||||
- Grant [migration admin roles](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts
|
- Grant [migration admin roles](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts
|
||||||
- Grant [migration viewer role](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts
|
- Grant [migration viewer role](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
@ -27,7 +27,7 @@ This sample creates\updates several distinct groups of resources:
|
||||||
|---|---|:---:|:---:|:---:|
|
|---|---|:---:|:---:|:---:|
|
||||||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||||
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
||||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||||
| [project_create](variables.tf#L31) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
| [project_create](variables.tf#L31) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||||
| [project_name](variables.tf#L40) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
| [project_name](variables.tf#L40) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ variable "migration_target_projects" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "migration_viewer_users" {
|
variable "migration_viewer_users" {
|
||||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,10 +16,10 @@ This sample creates\update several distinct groups of resources:
|
||||||
- M4CE host project with [required services](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#enabling_required_services_on_the_host_project) deployed on a new or existing project.
|
- M4CE host project with [required services](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#enabling_required_services_on_the_host_project) deployed on a new or existing project.
|
||||||
- M4CE target project prerequisites deployed on existing projects.
|
- M4CE target project prerequisites deployed on existing projects.
|
||||||
- IAM
|
- IAM
|
||||||
- Create a [service account](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
- Create a [service account](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
||||||
- Grant [migration admin roles](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts.
|
- Grant [migration admin roles](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts.
|
||||||
- Grant [migration viewer role](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts.
|
- Grant [migration viewer role](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to provided user accounts.
|
||||||
- Grant [roles on shared VPC](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/target-project#configure-permissions) to migration admins
|
- Grant [roles on shared VPC](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/target-project#configure-permissions) to migration admins
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
@ -29,7 +29,7 @@ This sample creates\update several distinct groups of resources:
|
||||||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||||
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
| [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | <code>list(string)</code> | ✓ | |
|
||||||
| [sharedvpc_host_projects](variables.tf#L45) | List of host projects that share a VPC with the selected target projects. | <code>list(string)</code> | ✓ | |
|
| [sharedvpc_host_projects](variables.tf#L45) | List of host projects that share a VPC with the selected target projects. | <code>list(string)</code> | ✓ | |
|
||||||
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||||
| [project_create](variables.tf#L30) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
| [project_create](variables.tf#L30) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||||
| [project_name](variables.tf#L39) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
| [project_name](variables.tf#L39) | Name of an existing project or of the new project assigned as M4CE host project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ This sample creates\update several distinct groups of resources:
|
||||||
|
|
||||||
<!-- END TFDOC -->
|
<!-- END TFDOC -->
|
||||||
## Manual Steps
|
## Manual Steps
|
||||||
Once this blueprint is deployed the M4CE [m4ce_gmanaged_service_account](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/target-sa-compute-engine#configuring_the_default_service_account) has to be configured to grant the access to the shared VPC and allow the deploy of Compute Engine instances as the result of the migration.
|
Once this blueprint is deployed the M4CE [m4ce_gmanaged_service_account](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/target-sa-compute-engine#configuring_the_default_service_account) has to be configured to grant the access to the shared VPC and allow the deploy of Compute Engine instances as the result of the migration.
|
||||||
|
|
||||||
## Test
|
## Test
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ variable "migration_target_projects" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "migration_viewer_users" {
|
variable "migration_viewer_users" {
|
||||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,9 +17,9 @@ This sample creates several distinct groups of resources:
|
||||||
- networking
|
- networking
|
||||||
- Default VPC network
|
- Default VPC network
|
||||||
- IAM
|
- IAM
|
||||||
- One [service account](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
- One [service account](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/migrate-connector#step-3) used at runtime by the M4CE connector for data replication
|
||||||
- Grant [migration admin roles](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to admin user accounts
|
- Grant [migration admin roles](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to admin user accounts
|
||||||
- Grant [migration viewer role](https://cloud.google.com/migrate/compute-engine/docs/5.0/how-to/enable-services#using_predefined_roles) to viewer user accounts
|
- Grant [migration viewer role](https://cloud.google.com/migrate/virtual-machines/docs/5.0/how-to/enable-services#using_predefined_roles) to viewer user accounts
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
@ -27,7 +27,7 @@ This sample creates several distinct groups of resources:
|
||||||
| name | description | type | required | default |
|
| name | description | type | required | default |
|
||||||
|---|---|:---:|:---:|:---:|
|
|---|---|:---:|:---:|:---:|
|
||||||
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
| [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | <code>list(string)</code> | ✓ | |
|
||||||
| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | <code>list(string)</code> | | <code>[]</code> |
|
||||||
| [project_create](variables.tf#L26) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
| [project_create](variables.tf#L26) | Parameters for the creation of the new project to host the M4CE backend. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
||||||
| [project_name](variables.tf#L35) | Name of an existing project or of the new project assigned as M4CE host an target project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
| [project_name](variables.tf#L35) | Name of an existing project or of the new project assigned as M4CE host an target project. | <code>string</code> | | <code>"m4ce-host-project-000"</code> |
|
||||||
| [vpc_config](variables.tf#L41) | Parameters to create a simple VPC on the M4CE project. | <code title="object({ ip_cidr_range = string, region = string })">object({…})</code> | | <code title="{ ip_cidr_range = "10.200.0.0/20", region = "us-west2" }">{…}</code> |
|
| [vpc_config](variables.tf#L41) | Parameters to create a simple VPC on the M4CE project. | <code title="object({ ip_cidr_range = string, region = string })">object({…})</code> | | <code title="{ ip_cidr_range = "10.200.0.0/20", region = "us-west2" }">{…}</code> |
|
||||||
|
|
|
@ -18,7 +18,7 @@ variable "migration_admin_users" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "migration_viewer_users" {
|
variable "migration_viewer_users" {
|
||||||
description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format."
|
description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format."
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,15 @@ This [blueprint](./composer-2/) creates a [Cloud Composer](https://cloud.google.
|
||||||
|
|
||||||
### Data Platform Foundations
|
### Data Platform Foundations
|
||||||
|
|
||||||
<a href="./data-platform-foundations/" title="Data Platform Foundations"><img src="./data-platform-foundations/images/overview_diagram.png" align="left" width="280px"></a>
|
<a href="./data-platform-foundations/" title="Data Platform"><img src="./data-platform-foundations/images/overview_diagram.png" align="left" width="280px"></a>
|
||||||
This [blueprint](./data-platform-foundations/) implements a robust and flexible Data Foundation on GCP that provides opinionated defaults, allowing customers to build and scale out additional data pipelines quickly and reliably.
|
This [blueprint](./data-platform-foundations/) implements a robust and flexible Data Platform on GCP that provides opinionated defaults, allowing customers to build and scale out additional data pipelines quickly and reliably.
|
||||||
|
|
||||||
|
<br clear="left">
|
||||||
|
|
||||||
|
### Minimal Data Platform
|
||||||
|
|
||||||
|
<a href="./data-platform-minimal/" title="Minimal Data Platform"><img src="./data-platform-minimal/images/diagram.png" align="left" width="280px"></a>
|
||||||
|
This [blueprint](./data-platform-minimal/) implements a minimal Data Platform on GCP that provides opinionated defaults, allowing customers to build and scale out additional data pipelines quickly and reliably.
|
||||||
|
|
||||||
<br clear="left">
|
<br clear="left">
|
||||||
|
|
||||||
|
|
|
@ -98,5 +98,5 @@ module "test" {
|
||||||
prefix = "prefix"
|
prefix = "prefix"
|
||||||
}
|
}
|
||||||
|
|
||||||
# tftest modules=9 resources=47
|
# tftest modules=9 resources=48
|
||||||
```
|
```
|
||||||
|
|
|
@ -12,7 +12,7 @@ In this tutorial we will also see how to make explainable predictions, in order
|
||||||
|
|
||||||
# Dataset
|
# Dataset
|
||||||
|
|
||||||
This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicy available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`.
|
This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicly available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`.
|
||||||
|
|
||||||
# Goal
|
# Goal
|
||||||
|
|
||||||
|
|
|
@ -354,7 +354,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# deploy the BigQuery ML model on Vertex Endpoint\n",
|
"# deploy the BigQuery ML model on Vertex Endpoint\n",
|
||||||
"# have a coffe - this step can take up 10/15 minutes to finish\n",
|
"# have a coffee - this step can take up 10/15 minutes to finish\n",
|
||||||
"model.deploy(endpoint=endpoint, deployed_model_display_name='bqml-deployed-model')"
|
"model.deploy(endpoint=endpoint, deployed_model_display_name='bqml-deployed-model')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -436,7 +436,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"Thanks to this tutorial we were able to:\n",
|
"Thanks to this tutorial we were able to:\n",
|
||||||
"- Define a re-usable Vertex AI pipeline to train and evaluate BQ ML models\n",
|
"- Define a re-usable Vertex AI pipeline to train and evaluate BQ ML models\n",
|
||||||
"- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different paramenters (in this case a different split for train/test data)\n",
|
"- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different parameters (in this case a different split for train/test data)\n",
|
||||||
"- Deploy the preferred model on a Vertex AI managed Endpoint in order to serve the model for real-time use cases via API\n",
|
"- Deploy the preferred model on a Vertex AI managed Endpoint in order to serve the model for real-time use cases via API\n",
|
||||||
"- Make batch prediction via Big Query and see what are the top 5 features which influenced the algorithm output"
|
"- Make batch prediction via Big Query and see what are the top 5 features which influenced the algorithm output"
|
||||||
]
|
]
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ Once you have the required information, head back to the cloud shell editor. Mak
|
||||||
|
|
||||||
Configure the Terraform variables in your `terraform.tfvars` file. You need to specify at least the `project_id` and `prefix` variables. See [`terraform.tfvars.sample`](terraform.tfvars.sample) as starting point.
|
Configure the Terraform variables in your `terraform.tfvars` file. You need to specify at least the `project_id` and `prefix` variables. See [`terraform.tfvars.sample`](terraform.tfvars.sample) as starting point.
|
||||||
|
|
||||||
![Deploy ressources](images/image2.png)
|
![Deploy resources](images/image2.png)
|
||||||
|
|
||||||
Run Terraform init:
|
Run Terraform init:
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ terraform apply
|
||||||
|
|
||||||
The resource creation will take a few minutes, at the end this is the output you should expect for successful completion along with a list of the created resources:
|
The resource creation will take a few minutes, at the end this is the output you should expect for successful completion along with a list of the created resources:
|
||||||
|
|
||||||
![Ressources installed](images/image3.png)
|
![Resources installed](images/image3.png)
|
||||||
|
|
||||||
## Move to real use case consideration
|
## Move to real use case consideration
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ locals {
|
||||||
"roles/cloudbuild.builds.editor" = [local.groups_iam.data-engineers]
|
"roles/cloudbuild.builds.editor" = [local.groups_iam.data-engineers]
|
||||||
"roles/cloudbuild.serviceAgent" = [module.orch-sa-df-build.iam_email]
|
"roles/cloudbuild.serviceAgent" = [module.orch-sa-df-build.iam_email]
|
||||||
"roles/composer.admin" = [local.groups_iam.data-engineers]
|
"roles/composer.admin" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/composer.user" = [local.groups_iam.data-engineers]
|
||||||
"roles/composer.environmentAndStorageObjectAdmin" = [local.groups_iam.data-engineers]
|
"roles/composer.environmentAndStorageObjectAdmin" = [local.groups_iam.data-engineers]
|
||||||
"roles/composer.ServiceAgentV2Ext" = [
|
"roles/composer.ServiceAgentV2Ext" = [
|
||||||
"serviceAccount:${module.orch-project.service_accounts.robots.composer}"
|
"serviceAccount:${module.orch-project.service_accounts.robots.composer}"
|
||||||
|
@ -74,7 +75,11 @@ module "orch-project" {
|
||||||
billing_account = var.project_config.billing_account_id
|
billing_account = var.project_config.billing_account_id
|
||||||
project_create = var.project_config.billing_account_id != null
|
project_create = var.project_config.billing_account_id != null
|
||||||
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
||||||
name = var.project_config.billing_account_id == null ? var.project_config.project_ids.orc : "${var.project_config.project_ids.orc}${local.project_suffix}"
|
name = (
|
||||||
|
var.project_config.billing_account_id == null
|
||||||
|
? var.project_config.project_ids.orc
|
||||||
|
: "${var.project_config.project_ids.orc}${local.project_suffix}"
|
||||||
|
)
|
||||||
iam = var.project_config.billing_account_id != null ? local.iam_orch : null
|
iam = var.project_config.billing_account_id != null ? local.iam_orch : null
|
||||||
iam_additive = var.project_config.billing_account_id == null ? local.iam_orch : null
|
iam_additive = var.project_config.billing_account_id == null ? local.iam_orch : null
|
||||||
oslogin = false
|
oslogin = false
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# tfdoc:file:description Trasformation project and VPC.
|
# tfdoc:file:description Transformation project and VPC.
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
iam_trf = {
|
iam_trf = {
|
||||||
|
|
|
@ -68,7 +68,7 @@ Legend: <code>+</code> additive, <code>•</code> conditional.
|
||||||
|
|
||||||
| members | roles |
|
| members | roles |
|
||||||
|---|---|
|
|---|---|
|
||||||
|<b>gcp-data-engineers</b><br><small><i>group</i></small>|[roles/artifactregistry.admin](https://cloud.google.com/iam/docs/understanding-roles#artifactregistry.admin) <br>[roles/bigquery.dataEditor](https://cloud.google.com/iam/docs/understanding-roles#bigquery.dataEditor) <br>[roles/bigquery.jobUser](https://cloud.google.com/iam/docs/understanding-roles#bigquery.jobUser) <br>[roles/cloudbuild.builds.editor](https://cloud.google.com/iam/docs/understanding-roles#cloudbuild.builds.editor) <br>[roles/composer.admin](https://cloud.google.com/iam/docs/understanding-roles#composer.admin) <br>[roles/composer.environmentAndStorageObjectAdmin](https://cloud.google.com/iam/docs/understanding-roles#composer.environmentAndStorageObjectAdmin) <br>[roles/iam.serviceAccountUser](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) <br>[roles/iap.httpsResourceAccessor](https://cloud.google.com/iam/docs/understanding-roles#iap.httpsResourceAccessor) <br>[roles/serviceusage.serviceUsageConsumer](https://cloud.google.com/iam/docs/understanding-roles#serviceusage.serviceUsageConsumer) <br>[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
|<b>gcp-data-engineers</b><br><small><i>group</i></small>|[roles/artifactregistry.admin](https://cloud.google.com/iam/docs/understanding-roles#artifactregistry.admin) <br>[roles/bigquery.dataEditor](https://cloud.google.com/iam/docs/understanding-roles#bigquery.dataEditor) <br>[roles/bigquery.jobUser](https://cloud.google.com/iam/docs/understanding-roles#bigquery.jobUser) <br>[roles/cloudbuild.builds.editor](https://cloud.google.com/iam/docs/understanding-roles#cloudbuild.builds.editor) <br>[roles/composer.admin](https://cloud.google.com/iam/docs/understanding-roles#composer.admin) <br>[roles/composer.environmentAndStorageObjectAdmin](https://cloud.google.com/iam/docs/understanding-roles#composer.environmentAndStorageObjectAdmin) <br>[roles/composer.user](https://cloud.google.com/iam/docs/understanding-roles#composer.user) <br>[roles/iam.serviceAccountUser](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) <br>[roles/iap.httpsResourceAccessor](https://cloud.google.com/iam/docs/understanding-roles#iap.httpsResourceAccessor) <br>[roles/serviceusage.serviceUsageConsumer](https://cloud.google.com/iam/docs/understanding-roles#serviceusage.serviceUsageConsumer) <br>[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
||||||
|<b>SERVICE_IDENTITY_cloudcomposer-accounts</b><br><small><i>serviceAccount</i></small>|[roles/composer.ServiceAgentV2Ext](https://cloud.google.com/iam/docs/understanding-roles#composer.ServiceAgentV2Ext) <br>[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
|<b>SERVICE_IDENTITY_cloudcomposer-accounts</b><br><small><i>serviceAccount</i></small>|[roles/composer.ServiceAgentV2Ext](https://cloud.google.com/iam/docs/understanding-roles#composer.ServiceAgentV2Ext) <br>[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
||||||
|<b>SERVICE_IDENTITY_gcp-sa-cloudbuild</b><br><small><i>serviceAccount</i></small>|[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
|<b>SERVICE_IDENTITY_gcp-sa-cloudbuild</b><br><small><i>serviceAccount</i></small>|[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
||||||
|<b>SERVICE_IDENTITY_service-networking</b><br><small><i>serviceAccount</i></small>|[roles/servicenetworking.serviceAgent](https://cloud.google.com/iam/docs/understanding-roles#servicenetworking.serviceAgent) <code>+</code>|
|
|<b>SERVICE_IDENTITY_service-networking</b><br><small><i>serviceAccount</i></small>|[roles/servicenetworking.serviceAgent](https://cloud.google.com/iam/docs/understanding-roles#servicenetworking.serviceAgent) <code>+</code>|
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
This module implements an opinionated Data Platform Architecture that creates and setup projects and related resources that compose an end-to-end data environment.
|
This module implements an opinionated Data Platform Architecture that creates and setup projects and related resources that compose an end-to-end data environment.
|
||||||
|
|
||||||
|
For a minimal Data Platform, please refer to the [Minimal Data Platform](../data-platform-minimal/) blueprint.
|
||||||
|
|
||||||
The code is intentionally simple, as it's intended to provide a generic initial setup and then allow easy customizations to complete the implementation of the intended design.
|
The code is intentionally simple, as it's intended to provide a generic initial setup and then allow easy customizations to complete the implementation of the intended design.
|
||||||
|
|
||||||
The following diagram is a high-level reference of the resources created and managed here:
|
The following diagram is a high-level reference of the resources created and managed here:
|
||||||
|
@ -39,13 +41,13 @@ This separation into projects allows adhering to the least-privilege principle b
|
||||||
The script will create the following projects:
|
The script will create the following projects:
|
||||||
|
|
||||||
- **Drop off** Used to store temporary data. Data is pushed to Cloud Storage, BigQuery, or Cloud PubSub. Resources are configured with a customizable lifecycle policy.
|
- **Drop off** Used to store temporary data. Data is pushed to Cloud Storage, BigQuery, or Cloud PubSub. Resources are configured with a customizable lifecycle policy.
|
||||||
- **Load** Used to load data from the drop off zone to the data warehouse. The load is made with minimal to zero transformation logic (mainly `cast`). Anonymization or tokenization of Personally Identifiable Information (PII) can be implemented here or in the transformation stage, depending on your requirements. The use of [Cloud Dataflow templates](https://cloud.google.com/dataflow/docs/concepts/dataflow-templates) is recommended. When you need to handle workloads from different teams, if strong role separation is needed between them, we suggest to customize the scirpt and have separate `Load` projects.
|
- **Load** Used to load data from the drop off zone to the data warehouse. The load is made with minimal to zero transformation logic (mainly `cast`). Anonymization or tokenization of Personally Identifiable Information (PII) can be implemented here or in the transformation stage, depending on your requirements. The use of [Cloud Dataflow templates](https://cloud.google.com/dataflow/docs/concepts/dataflow-templates) is recommended. When you need to handle workloads from different teams, if strong role separation is needed between them, we suggest to customize the script and have separate `Load` projects.
|
||||||
- **Data Warehouse** Several projects distributed across 3 separate layers, to host progressively processed and refined data:
|
- **Data Warehouse** Several projects distributed across 3 separate layers, to host progressively processed and refined data:
|
||||||
- **Landing - Raw data** Structured Data, stored in relevant formats: structured data stored in BigQuery, unstructured data stored on Cloud Storage with additional metadata stored in BigQuery (for example pictures stored in Cloud Storage and analysis of the images for Cloud Vision API stored in BigQuery).
|
- **Landing - Raw data** Structured Data, stored in relevant formats: structured data stored in BigQuery, unstructured data stored on Cloud Storage with additional metadata stored in BigQuery (for example pictures stored in Cloud Storage and analysis of the images for Cloud Vision API stored in BigQuery).
|
||||||
- **Curated - Cleansed, aggregated and curated data**
|
- **Curated - Cleansed, aggregated and curated data**
|
||||||
- **Confidential - Curated and unencrypted layer**
|
- **Confidential - Curated and unencrypted layer**
|
||||||
- **Orchestration** Used to host Cloud Composer, which orchestrates all tasks that move data across layers.
|
- **Orchestration** Used to host Cloud Composer, which orchestrates all tasks that move data across layers.
|
||||||
- **Transformation** Used to move data between Data Warehouse layers. We strongly suggest relying on BigQuery Engine to perform the transformations. If BigQuery doesn't have the features needed to perform your transformations, you can use Cloud Dataflow with [Cloud Dataflow templates](https://cloud.google.com/dataflow/docs/concepts/dataflow-templates). This stage can also optionally anonymize or tokenize PII. When you need to handle workloads from different teams, if strong role separation is needed between them, we suggest to customize the scirpt and have separate `Tranformation` projects.
|
- **Transformation** Used to move data between Data Warehouse layers. We strongly suggest relying on BigQuery Engine to perform the transformations. If BigQuery doesn't have the features needed to perform your transformations, you can use Cloud Dataflow with [Cloud Dataflow templates](https://cloud.google.com/dataflow/docs/concepts/dataflow-templates). This stage can also optionally anonymize or tokenize PII. When you need to handle workloads from different teams, if strong role separation is needed between them, we suggest to customize the script and have separate `Transformation` projects.
|
||||||
- **Exposure** Used to host resources that share processed data with external systems. Depending on the access pattern, data can be presented via Cloud SQL, BigQuery, or Bigtable. For BigQuery data, we strongly suggest relying on [Authorized views](https://cloud.google.com/bigquery/docs/authorized-views).
|
- **Exposure** Used to host resources that share processed data with external systems. Depending on the access pattern, data can be presented via Cloud SQL, BigQuery, or Bigtable. For BigQuery data, we strongly suggest relying on [Authorized views](https://cloud.google.com/bigquery/docs/authorized-views).
|
||||||
|
|
||||||
### Roles
|
### Roles
|
||||||
|
@ -224,7 +226,7 @@ module "data-platform" {
|
||||||
prefix = "myprefix"
|
prefix = "myprefix"
|
||||||
}
|
}
|
||||||
|
|
||||||
# tftest modules=43 resources=278
|
# tftest modules=43 resources=279
|
||||||
```
|
```
|
||||||
|
|
||||||
## Customizations
|
## Customizations
|
||||||
|
@ -305,5 +307,5 @@ module "test" {
|
||||||
}
|
}
|
||||||
prefix = "prefix"
|
prefix = "prefix"
|
||||||
}
|
}
|
||||||
# tftest modules=43 resources=278
|
# tftest modules=43 resources=279
|
||||||
```
|
```
|
||||||
|
|
|
@ -22,7 +22,7 @@ import argparse
|
||||||
|
|
||||||
class ParseRow(beam.DoFn):
|
class ParseRow(beam.DoFn):
|
||||||
"""
|
"""
|
||||||
Splits a given csv row by a seperator, validates fields and returns a dict
|
Splits a given csv row by a separator, validates fields and returns a dict
|
||||||
structure compatible with the BigQuery transform
|
structure compatible with the BigQuery transform
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Landing project and resources.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
iam_lnd = {
|
||||||
|
"roles/storage.objectCreator" = [module.land-sa-cs-0.iam_email]
|
||||||
|
"roles/storage.objectViewer" = [module.processing-sa-cmp-0.iam_email]
|
||||||
|
"roles/storage.objectAdmin" = [module.processing-sa-0.iam_email]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "land-project" {
|
||||||
|
source = "../../../modules/project"
|
||||||
|
parent = var.project_config.parent
|
||||||
|
billing_account = var.project_config.billing_account_id
|
||||||
|
project_create = var.project_config.billing_account_id != null
|
||||||
|
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
||||||
|
name = (
|
||||||
|
var.project_config.billing_account_id == null
|
||||||
|
? var.project_config.project_ids.landing
|
||||||
|
: "${var.project_config.project_ids.landing}${local.project_suffix}"
|
||||||
|
)
|
||||||
|
iam = var.project_config.billing_account_id != null ? local.iam_lnd : null
|
||||||
|
iam_additive = var.project_config.billing_account_id == null ? local.iam_lnd : null
|
||||||
|
services = [
|
||||||
|
"cloudkms.googleapis.com",
|
||||||
|
"cloudresourcemanager.googleapis.com",
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"serviceusage.googleapis.com",
|
||||||
|
"stackdriver.googleapis.com",
|
||||||
|
"storage.googleapis.com",
|
||||||
|
"storage-component.googleapis.com",
|
||||||
|
]
|
||||||
|
service_encryption_key_ids = {
|
||||||
|
bq = [var.service_encryption_keys.bq]
|
||||||
|
storage = [var.service_encryption_keys.storage]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cloud Storage
|
||||||
|
|
||||||
|
module "land-sa-cs-0" {
|
||||||
|
source = "../../../modules/iam-service-account"
|
||||||
|
project_id = module.land-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "lnd-cs-0"
|
||||||
|
display_name = "Data platform GCS landing service account."
|
||||||
|
iam = {
|
||||||
|
"roles/iam.serviceAccountTokenCreator" = [
|
||||||
|
local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "land-cs-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.land-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "lnd-cs-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
force_destroy = var.data_force_destroy
|
||||||
|
}
|
|
@ -0,0 +1,117 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Cloud Composer resources.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
env_variables = {
|
||||||
|
BQ_LOCATION = var.location
|
||||||
|
CURATED_BQ_DATASET = module.cur-bq-0.dataset_id
|
||||||
|
CURATED_GCS = module.cur-cs-0.url
|
||||||
|
CURATED_PRJ = module.cur-project.project_id
|
||||||
|
DP_KMS_KEY = var.service_encryption_keys.compute
|
||||||
|
DP_REGION = var.region
|
||||||
|
GCP_REGION = var.region
|
||||||
|
LAND_PRJ = module.land-project.project_id
|
||||||
|
LAND_GCS = module.land-cs-0.name
|
||||||
|
PHS_CLUSTER_NAME = try(module.processing-dp-historyserver[0].name, null)
|
||||||
|
PROCESSING_GCS = module.processing-cs-0.name
|
||||||
|
PROCESSING_PRJ = module.processing-project.project_id
|
||||||
|
PROCESSING_SA = module.processing-sa-0.email
|
||||||
|
PROCESSING_SUBNET = local.processing_subnet
|
||||||
|
PROCESSING_VPC = local.processing_vpc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-sa-cmp-0" {
|
||||||
|
source = "../../../modules/iam-service-account"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-cmp-0"
|
||||||
|
display_name = "Data platform Composer service account"
|
||||||
|
iam = {
|
||||||
|
"roles/iam.serviceAccountTokenCreator" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/iam.serviceAccountUser" = [module.processing-sa-cmp-0.iam_email]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_composer_environment" "processing-cmp-0" {
|
||||||
|
count = var.enable_services.composer == true ? 1 : 0
|
||||||
|
project = module.processing-project.project_id
|
||||||
|
name = "${var.prefix}-prc-cmp-0"
|
||||||
|
region = var.region
|
||||||
|
config {
|
||||||
|
software_config {
|
||||||
|
airflow_config_overrides = var.composer_config.software_config.airflow_config_overrides
|
||||||
|
pypi_packages = var.composer_config.software_config.pypi_packages
|
||||||
|
env_variables = merge(
|
||||||
|
var.composer_config.software_config.env_variables, local.env_variables
|
||||||
|
)
|
||||||
|
image_version = var.composer_config.software_config.image_version
|
||||||
|
}
|
||||||
|
workloads_config {
|
||||||
|
scheduler {
|
||||||
|
cpu = var.composer_config.workloads_config.scheduler.cpu
|
||||||
|
memory_gb = var.composer_config.workloads_config.scheduler.memory_gb
|
||||||
|
storage_gb = var.composer_config.workloads_config.scheduler.storage_gb
|
||||||
|
count = var.composer_config.workloads_config.scheduler.count
|
||||||
|
}
|
||||||
|
web_server {
|
||||||
|
cpu = var.composer_config.workloads_config.web_server.cpu
|
||||||
|
memory_gb = var.composer_config.workloads_config.web_server.memory_gb
|
||||||
|
storage_gb = var.composer_config.workloads_config.web_server.storage_gb
|
||||||
|
}
|
||||||
|
worker {
|
||||||
|
cpu = var.composer_config.workloads_config.worker.cpu
|
||||||
|
memory_gb = var.composer_config.workloads_config.worker.memory_gb
|
||||||
|
storage_gb = var.composer_config.workloads_config.worker.storage_gb
|
||||||
|
min_count = var.composer_config.workloads_config.worker.min_count
|
||||||
|
max_count = var.composer_config.workloads_config.worker.max_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
environment_size = var.composer_config.environment_size
|
||||||
|
|
||||||
|
node_config {
|
||||||
|
network = local.processing_vpc
|
||||||
|
subnetwork = local.processing_subnet
|
||||||
|
service_account = module.processing-sa-cmp-0.email
|
||||||
|
enable_ip_masq_agent = true
|
||||||
|
tags = ["composer-worker"]
|
||||||
|
ip_allocation_policy {
|
||||||
|
cluster_secondary_range_name = var.network_config.composer_ip_ranges.pods_range_name
|
||||||
|
services_secondary_range_name = var.network_config.composer_ip_ranges.services_range_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private_environment_config {
|
||||||
|
enable_private_endpoint = "true"
|
||||||
|
cloud_sql_ipv4_cidr_block = var.network_config.composer_ip_ranges.cloud_sql
|
||||||
|
master_ipv4_cidr_block = var.network_config.composer_ip_ranges.gke_master
|
||||||
|
cloud_composer_connection_subnetwork = var.network_config.composer_ip_ranges.connection_subnetwork
|
||||||
|
}
|
||||||
|
dynamic "encryption_config" {
|
||||||
|
for_each = (
|
||||||
|
var.service_encryption_keys.composer != null
|
||||||
|
? { 1 = 1 }
|
||||||
|
: {}
|
||||||
|
)
|
||||||
|
content {
|
||||||
|
kms_key_name = var.service_encryption_keys.composer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
depends_on = [
|
||||||
|
module.processing-project
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,123 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Cloud Dataproc resources.
|
||||||
|
|
||||||
|
module "processing-dp-history" {
|
||||||
|
count = var.enable_services.dataproc_history_server == true ? 1 : 0
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-cs-dp-history"
|
||||||
|
location = var.region
|
||||||
|
storage_class = "REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-sa-0" {
|
||||||
|
source = "../../../modules/iam-service-account"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-0"
|
||||||
|
display_name = "Processing service account"
|
||||||
|
iam = {
|
||||||
|
"roles/iam.serviceAccountTokenCreator" = [
|
||||||
|
local.groups_iam.data-engineers,
|
||||||
|
module.processing-sa-cmp-0.iam_email
|
||||||
|
],
|
||||||
|
"roles/iam.serviceAccountUser" = [
|
||||||
|
module.processing-sa-cmp-0.iam_email
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-staging-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-stg-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-temp-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-tmp-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-log-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-log-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-dp-historyserver" {
|
||||||
|
count = var.enable_services.dataproc_history_server == true ? 1 : 0
|
||||||
|
source = "../../../modules/dataproc"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
name = "history-server"
|
||||||
|
prefix = var.prefix
|
||||||
|
region = var.region
|
||||||
|
dataproc_config = {
|
||||||
|
cluster_config = {
|
||||||
|
staging_bucket = module.processing-staging-0.name
|
||||||
|
temp_bucket = module.processing-temp-0.name
|
||||||
|
gce_cluster_config = {
|
||||||
|
subnetwork = module.processing-vpc[0].subnets["${var.region}/${var.prefix}-processing"].self_link
|
||||||
|
zone = "${var.region}-b"
|
||||||
|
service_account = module.processing-sa-0.email
|
||||||
|
service_account_scopes = ["cloud-platform"]
|
||||||
|
internal_ip_only = true
|
||||||
|
}
|
||||||
|
worker_config = {
|
||||||
|
num_instances = 0
|
||||||
|
machine_type = null
|
||||||
|
min_cpu_platform = null
|
||||||
|
image_uri = null
|
||||||
|
}
|
||||||
|
software_config = {
|
||||||
|
override_properties = {
|
||||||
|
"dataproc:dataproc.allow.zero.workers" = "true"
|
||||||
|
"dataproc:job.history.to-gcs.enabled" = "true"
|
||||||
|
"spark:spark.history.fs.logDirectory" = (
|
||||||
|
"gs://${module.processing-staging-0.name}/*/spark-job-history"
|
||||||
|
)
|
||||||
|
"spark:spark.eventLog.dir" = (
|
||||||
|
"gs://${module.processing-staging-0.name}/*/spark-job-history"
|
||||||
|
)
|
||||||
|
"spark:spark.history.custom.executor.log.url.applyIncompleteApplication" = "false"
|
||||||
|
"spark:spark.history.custom.executor.log.url" = (
|
||||||
|
"{{YARN_LOG_SERVER_URL}}/{{NM_HOST}}:{{NM_PORT}}/{{CONTAINER_ID}}/{{CONTAINER_ID}}/{{USER}}/{{FILE_NAME}}"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endpoint_config = {
|
||||||
|
enable_http_port_access = "true"
|
||||||
|
}
|
||||||
|
encryption_config = {
|
||||||
|
kms_key_name = var.service_encryption_keys.compute
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,160 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Processing project and VPC.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
iam_processing = {
|
||||||
|
"roles/composer.admin" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/composer.environmentAndStorageObjectAdmin" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/composer.ServiceAgentV2Ext" = [
|
||||||
|
"serviceAccount:${module.processing-project.service_accounts.robots.composer}"
|
||||||
|
]
|
||||||
|
"roles/composer.worker" = [
|
||||||
|
module.processing-sa-cmp-0.iam_email
|
||||||
|
]
|
||||||
|
"roles/dataproc.editor" = [
|
||||||
|
module.processing-sa-cmp-0.iam_email
|
||||||
|
]
|
||||||
|
"roles/dataproc.worker" = [
|
||||||
|
module.processing-sa-0.iam_email
|
||||||
|
]
|
||||||
|
"roles/iam.serviceAccountUser" = [
|
||||||
|
module.processing-sa-cmp-0.iam_email, local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/iap.httpsResourceAccessor" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/serviceusage.serviceUsageConsumer" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/storage.admin" = [
|
||||||
|
module.processing-sa-cmp-0.iam_email,
|
||||||
|
"serviceAccount:${module.processing-project.service_accounts.robots.composer}",
|
||||||
|
local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
}
|
||||||
|
processing_subnet = (
|
||||||
|
local.use_shared_vpc
|
||||||
|
? var.network_config.subnet_self_links.processingestration
|
||||||
|
: module.processing-vpc.0.subnet_self_links["${var.region}/${var.prefix}-processing"]
|
||||||
|
)
|
||||||
|
processing_vpc = (
|
||||||
|
local.use_shared_vpc
|
||||||
|
? var.network_config.network_self_link
|
||||||
|
: module.processing-vpc.0.self_link
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-project" {
|
||||||
|
source = "../../../modules/project"
|
||||||
|
parent = var.project_config.parent
|
||||||
|
billing_account = var.project_config.billing_account_id
|
||||||
|
project_create = var.project_config.billing_account_id != null
|
||||||
|
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
||||||
|
name = (
|
||||||
|
var.project_config.billing_account_id == null
|
||||||
|
? var.project_config.project_ids.processing
|
||||||
|
: "${var.project_config.project_ids.processing}${local.project_suffix}"
|
||||||
|
)
|
||||||
|
iam = var.project_config.billing_account_id != null ? local.iam_processing : null
|
||||||
|
iam_additive = var.project_config.billing_account_id == null ? local.iam_processing : null
|
||||||
|
oslogin = false
|
||||||
|
services = [
|
||||||
|
"bigquery.googleapis.com",
|
||||||
|
"bigqueryreservation.googleapis.com",
|
||||||
|
"bigquerystorage.googleapis.com",
|
||||||
|
"cloudkms.googleapis.com",
|
||||||
|
"cloudresourcemanager.googleapis.com",
|
||||||
|
"composer.googleapis.com",
|
||||||
|
"compute.googleapis.com",
|
||||||
|
"container.googleapis.com",
|
||||||
|
"dataproc.googleapis.com",
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"servicenetworking.googleapis.com",
|
||||||
|
"serviceusage.googleapis.com",
|
||||||
|
"stackdriver.googleapis.com",
|
||||||
|
"storage.googleapis.com",
|
||||||
|
"storage-component.googleapis.com"
|
||||||
|
]
|
||||||
|
service_encryption_key_ids = {
|
||||||
|
composer = [var.service_encryption_keys.composer]
|
||||||
|
compute = [var.service_encryption_keys.compute]
|
||||||
|
storage = [var.service_encryption_keys.storage]
|
||||||
|
}
|
||||||
|
shared_vpc_service_config = var.network_config.host_project == null ? null : {
|
||||||
|
attach = true
|
||||||
|
host_project = var.network_config.host_project
|
||||||
|
service_identity_iam = {
|
||||||
|
"roles/compute.networkUser" = [
|
||||||
|
"cloudservices", "compute", "container-engine"
|
||||||
|
]
|
||||||
|
"roles/composer.sharedVpcAgent" = [
|
||||||
|
"composer"
|
||||||
|
]
|
||||||
|
"roles/container.hostServiceAgentUser" = [
|
||||||
|
"container-egine"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cloud Storage
|
||||||
|
|
||||||
|
module "processing-cs-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "prc-cs-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
# internal VPC resources
|
||||||
|
|
||||||
|
module "processing-vpc" {
|
||||||
|
source = "../../../modules/net-vpc"
|
||||||
|
count = local.use_shared_vpc ? 0 : 1
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
name = "${var.prefix}-processing"
|
||||||
|
subnets = [
|
||||||
|
{
|
||||||
|
ip_cidr_range = "10.10.0.0/24"
|
||||||
|
name = "${var.prefix}-processing"
|
||||||
|
region = var.region
|
||||||
|
secondary_ip_ranges = {
|
||||||
|
pods = "10.10.8.0/22"
|
||||||
|
services = "10.10.12.0/24"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-vpc-firewall" {
|
||||||
|
source = "../../../modules/net-vpc-firewall"
|
||||||
|
count = local.use_shared_vpc ? 0 : 1
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
network = module.processing-vpc.0.name
|
||||||
|
default_rules_config = {
|
||||||
|
admin_ranges = ["10.10.0.0/24"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "processing-nat" {
|
||||||
|
count = local.use_shared_vpc ? 0 : 1
|
||||||
|
source = "../../../modules/net-cloudnat"
|
||||||
|
project_id = module.processing-project.project_id
|
||||||
|
name = "${var.prefix}-processing"
|
||||||
|
region = var.region
|
||||||
|
router_network = module.processing-vpc.0.name
|
||||||
|
}
|
|
@ -0,0 +1,99 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Data curated project and resources.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
cur_iam = {
|
||||||
|
"roles/bigquery.dataOwner" = [module.processing-sa-0.iam_email]
|
||||||
|
"roles/bigquery.dataViewer" = [
|
||||||
|
local.groups_iam.data-analysts,
|
||||||
|
local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/bigquery.jobUser" = [
|
||||||
|
module.processing-sa-0.iam_email,
|
||||||
|
local.groups_iam.data-analysts,
|
||||||
|
local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/datacatalog.tagTemplateViewer" = [
|
||||||
|
local.groups_iam.data-analysts, local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/datacatalog.viewer" = [
|
||||||
|
local.groups_iam.data-analysts, local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/storage.objectViewer" = [
|
||||||
|
local.groups_iam.data-analysts, local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/storage.objectAdmin" = [module.processing-sa-0.iam_email]
|
||||||
|
}
|
||||||
|
cur_services = [
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"bigquery.googleapis.com",
|
||||||
|
"bigqueryreservation.googleapis.com",
|
||||||
|
"bigquerystorage.googleapis.com",
|
||||||
|
"cloudkms.googleapis.com",
|
||||||
|
"cloudresourcemanager.googleapis.com",
|
||||||
|
"compute.googleapis.com",
|
||||||
|
"servicenetworking.googleapis.com",
|
||||||
|
"serviceusage.googleapis.com",
|
||||||
|
"stackdriver.googleapis.com",
|
||||||
|
"storage.googleapis.com",
|
||||||
|
"storage-component.googleapis.com"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Project
|
||||||
|
|
||||||
|
module "cur-project" {
|
||||||
|
source = "../../../modules/project"
|
||||||
|
parent = var.project_config.parent
|
||||||
|
billing_account = var.project_config.billing_account_id
|
||||||
|
project_create = var.project_config.billing_account_id != null
|
||||||
|
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
||||||
|
name = (
|
||||||
|
var.project_config.billing_account_id == null
|
||||||
|
? var.project_config.project_ids.curated
|
||||||
|
: "${var.project_config.project_ids.curated}${local.project_suffix}"
|
||||||
|
)
|
||||||
|
iam = var.project_config.billing_account_id != null ? local.cur_iam : {}
|
||||||
|
iam_additive = var.project_config.billing_account_id == null ? local.cur_iam : {}
|
||||||
|
services = local.cur_services
|
||||||
|
service_encryption_key_ids = {
|
||||||
|
bq = [var.service_encryption_keys.bq]
|
||||||
|
storage = [var.service_encryption_keys.storage]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Bigquery
|
||||||
|
|
||||||
|
module "cur-bq-0" {
|
||||||
|
source = "../../../modules/bigquery-dataset"
|
||||||
|
project_id = module.cur-project.project_id
|
||||||
|
id = "${replace(var.prefix, "-", "_")}_cur_bq_0"
|
||||||
|
location = var.location
|
||||||
|
encryption_key = var.service_encryption_keys.bq
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cloud storage
|
||||||
|
|
||||||
|
module "cur-cs-0" {
|
||||||
|
source = "../../../modules/gcs"
|
||||||
|
project_id = module.cur-project.project_id
|
||||||
|
prefix = var.prefix
|
||||||
|
name = "cur-cs-0"
|
||||||
|
location = var.location
|
||||||
|
storage_class = "MULTI_REGIONAL"
|
||||||
|
encryption_key = var.service_encryption_keys.storage
|
||||||
|
force_destroy = var.data_force_destroy
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Common project and resources.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
iam_common = {
|
||||||
|
"roles/dlp.admin" = [local.groups_iam.data-security]
|
||||||
|
"roles/dlp.estimatesAdmin" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/dlp.reader" = [local.groups_iam.data-engineers]
|
||||||
|
"roles/dlp.user" = [
|
||||||
|
module.processing-sa-0.iam_email,
|
||||||
|
local.groups_iam.data-engineers
|
||||||
|
]
|
||||||
|
"roles/datacatalog.admin" = [local.groups_iam.data-security]
|
||||||
|
"roles/datacatalog.viewer" = [
|
||||||
|
module.processing-sa-0.iam_email,
|
||||||
|
local.groups_iam.data-analysts
|
||||||
|
]
|
||||||
|
"roles/datacatalog.categoryFineGrainedReader" = [
|
||||||
|
module.processing-sa-0.iam_email
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
module "common-project" {
|
||||||
|
source = "../../../modules/project"
|
||||||
|
parent = var.project_config.parent
|
||||||
|
billing_account = var.project_config.billing_account_id
|
||||||
|
project_create = var.project_config.billing_account_id != null
|
||||||
|
prefix = var.project_config.billing_account_id == null ? null : var.prefix
|
||||||
|
name = (
|
||||||
|
var.project_config.billing_account_id == null
|
||||||
|
? var.project_config.project_ids.common
|
||||||
|
: "${var.project_config.project_ids.common}${local.project_suffix}"
|
||||||
|
)
|
||||||
|
iam = var.project_config.billing_account_id != null ? local.iam_common : null
|
||||||
|
iam_additive = var.project_config.billing_account_id == null ? local.iam_common : null
|
||||||
|
services = [
|
||||||
|
"cloudresourcemanager.googleapis.com",
|
||||||
|
"datacatalog.googleapis.com",
|
||||||
|
"dlp.googleapis.com",
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"serviceusage.googleapis.com",
|
||||||
|
"stackdriver.googleapis.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Data Catalog Policy tag
|
||||||
|
|
||||||
|
module "common-datacatalog" {
|
||||||
|
source = "../../../modules/data-catalog-policy-tag"
|
||||||
|
project_id = module.common-project.project_id
|
||||||
|
name = "${var.prefix}-datacatalog-policy-tags"
|
||||||
|
location = var.location
|
||||||
|
tags = var.data_catalog_tags
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
# IAM bindings reference
|
||||||
|
|
||||||
|
Legend: <code>+</code> additive, <code>•</code> conditional.
|
||||||
|
|
||||||
|
## Project <i>cmn</i>
|
||||||
|
|
||||||
|
| members | roles |
|
||||||
|
|---|---|
|
||||||
|
|<b>gcp-data-analysts</b><br><small><i>group</i></small>|[roles/datacatalog.viewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.viewer) |
|
||||||
|
|<b>gcp-data-engineers</b><br><small><i>group</i></small>|[roles/dlp.estimatesAdmin](https://cloud.google.com/iam/docs/understanding-roles#dlp.estimatesAdmin) <br>[roles/dlp.reader](https://cloud.google.com/iam/docs/understanding-roles#dlp.reader) <br>[roles/dlp.user](https://cloud.google.com/iam/docs/understanding-roles#dlp.user) |
|
||||||
|
|<b>gcp-data-security</b><br><small><i>group</i></small>|[roles/datacatalog.admin](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.admin) <br>[roles/dlp.admin](https://cloud.google.com/iam/docs/understanding-roles#dlp.admin) |
|
||||||
|
|<b>prc-dp-0</b><br><small><i>serviceAccount</i></small>|[roles/datacatalog.categoryFineGrainedReader](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.categoryFineGrainedReader) <br>[roles/datacatalog.viewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.viewer) <br>[roles/dlp.user](https://cloud.google.com/iam/docs/understanding-roles#dlp.user) |
|
||||||
|
|
||||||
|
## Project <i>cur</i>
|
||||||
|
|
||||||
|
| members | roles |
|
||||||
|
|---|---|
|
||||||
|
|<b>gcp-data-analysts</b><br><small><i>group</i></small>|[roles/bigquery.dataViewer](https://cloud.google.com/iam/docs/understanding-roles#bigquery.dataViewer) <br>[roles/bigquery.jobUser](https://cloud.google.com/iam/docs/understanding-roles#bigquery.jobUser) <br>[roles/datacatalog.tagTemplateViewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.tagTemplateViewer) <br>[roles/datacatalog.viewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.viewer) <br>[roles/storage.objectViewer](https://cloud.google.com/iam/docs/understanding-roles#storage.objectViewer) |
|
||||||
|
|<b>gcp-data-engineers</b><br><small><i>group</i></small>|[roles/bigquery.dataViewer](https://cloud.google.com/iam/docs/understanding-roles#bigquery.dataViewer) <br>[roles/bigquery.jobUser](https://cloud.google.com/iam/docs/understanding-roles#bigquery.jobUser) <br>[roles/datacatalog.tagTemplateViewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.tagTemplateViewer) <br>[roles/datacatalog.viewer](https://cloud.google.com/iam/docs/understanding-roles#datacatalog.viewer) <br>[roles/storage.objectViewer](https://cloud.google.com/iam/docs/understanding-roles#storage.objectViewer) |
|
||||||
|
|<b>SERVICE_IDENTITY_service-networking</b><br><small><i>serviceAccount</i></small>|[roles/servicenetworking.serviceAgent](https://cloud.google.com/iam/docs/understanding-roles#servicenetworking.serviceAgent) <code>+</code>|
|
||||||
|
|<b>prc-dp-0</b><br><small><i>serviceAccount</i></small>|[roles/bigquery.dataOwner](https://cloud.google.com/iam/docs/understanding-roles#bigquery.dataOwner) <br>[roles/bigquery.jobUser](https://cloud.google.com/iam/docs/understanding-roles#bigquery.jobUser) <br>[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
||||||
|
|
||||||
|
## Project <i>lnd</i>
|
||||||
|
|
||||||
|
| members | roles |
|
||||||
|
|---|---|
|
||||||
|
|<b>lnd-cs-0</b><br><small><i>serviceAccount</i></small>|[roles/storage.objectCreator](https://cloud.google.com/iam/docs/understanding-roles#storage.objectCreator) |
|
||||||
|
|<b>prc-cmp-0</b><br><small><i>serviceAccount</i></small>|[roles/storage.objectViewer](https://cloud.google.com/iam/docs/understanding-roles#storage.objectViewer) |
|
||||||
|
|<b>prc-dp-0</b><br><small><i>serviceAccount</i></small>|[roles/storage.objectAdmin](https://cloud.google.com/iam/docs/understanding-roles#storage.objectAdmin) |
|
||||||
|
|
||||||
|
## Project <i>prc</i>
|
||||||
|
|
||||||
|
| members | roles |
|
||||||
|
|---|---|
|
||||||
|
|<b>gcp-data-engineers</b><br><small><i>group</i></small>|[roles/composer.admin](https://cloud.google.com/iam/docs/understanding-roles#composer.admin) <br>[roles/composer.environmentAndStorageObjectAdmin](https://cloud.google.com/iam/docs/understanding-roles#composer.environmentAndStorageObjectAdmin) <br>[roles/iam.serviceAccountUser](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) <br>[roles/iap.httpsResourceAccessor](https://cloud.google.com/iam/docs/understanding-roles#iap.httpsResourceAccessor) <br>[roles/serviceusage.serviceUsageConsumer](https://cloud.google.com/iam/docs/understanding-roles#serviceusage.serviceUsageConsumer) <br>[roles/storage.admin](https://cloud.google.com/iam/docs/understanding-roles#storage.admin) |
|
||||||
|
|<b>SERVICE_IDENTITY_cloudcomposer-accounts</b><br><small><i>serviceAccount</i></small>|[roles/composer.ServiceAgentV2Ext](https://cloud.google.com/iam/docs/understanding-roles#composer.ServiceAgentV2Ext) <br>[roles/storage.admin](https://cloud.google.com/iam/docs/understanding-roles#storage.admin) |
|
||||||
|
|<b>SERVICE_IDENTITY_service-networking</b><br><small><i>serviceAccount</i></small>|[roles/servicenetworking.serviceAgent](https://cloud.google.com/iam/docs/understanding-roles#servicenetworking.serviceAgent) <code>+</code>|
|
||||||
|
|<b>prc-cmp-0</b><br><small><i>serviceAccount</i></small>|[roles/composer.worker](https://cloud.google.com/iam/docs/understanding-roles#composer.worker) <br>[roles/dataproc.editor](https://cloud.google.com/iam/docs/understanding-roles#dataproc.editor) <br>[roles/iam.serviceAccountUser](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) <br>[roles/storage.admin](https://cloud.google.com/iam/docs/understanding-roles#storage.admin) |
|
||||||
|
|<b>prc-dp-0</b><br><small><i>serviceAccount</i></small>|[roles/dataproc.worker](https://cloud.google.com/iam/docs/understanding-roles#dataproc.worker) |
|
|
@ -0,0 +1,309 @@
|
||||||
|
# Minimal Data Platform
|
||||||
|
|
||||||
|
This module implements a minimal opinionated Data Platform Architecture based on Dataproc Serverless resources. It creates and sets up projects and related resources that compose an end-to-end data environment.
|
||||||
|
|
||||||
|
This minimal Data Platform Architecture keep to a minimal set of projects the solution. The approach make the architecture easy to read and operate but limit the ability to scale to handle multiple worklaods. To better handle more complex use cases where workloads need processing role segmentation betwneed transformations or deeper cost attribution are needed, it is suggested to refer to the [Data Platform](../data-platform-foundations/) blueprint.
|
||||||
|
|
||||||
|
The code is intentionally simple, as it's intended to provide a generic initial setup and then allow easy customizations to complete the implementation of the intended design.
|
||||||
|
|
||||||
|
The following diagram is a high-level reference of the resources created and managed here:
|
||||||
|
|
||||||
|
![Data Platform architecture overview](./images/diagram.png "Data Platform architecture overview")
|
||||||
|
|
||||||
|
A demo [Airflow pipeline](demo/orchestrate_pyspark.py) is also part of this blueprint: it can be built and run on top of the foundational infrastructure to verify or test the setup quickly.
|
||||||
|
|
||||||
|
## Design overview and choices
|
||||||
|
|
||||||
|
Despite its simplicity, this stage implements the basics of a design that we've seen working well for various customers.
|
||||||
|
|
||||||
|
The approach adapts to different high-level requirements:
|
||||||
|
|
||||||
|
- boundaries for each step
|
||||||
|
- clearly defined actors
|
||||||
|
- least privilege principle
|
||||||
|
- rely on service account impersonation
|
||||||
|
|
||||||
|
The code in this blueprint doesn't address Organization-level configurations (Organization policy, VPC-SC, centralized logs). We expect those elements to be managed by automation stages external to this script like those in [FAST](../../../fast) and this blueprint deployed on top of them as one of the [stages](../../../fast/stages/3-data-platform/dev/README.md).
|
||||||
|
|
||||||
|
## Project structure
|
||||||
|
|
||||||
|
The Data Platform is designed to rely on several projects, one project per data stage. The stages identified are:
|
||||||
|
|
||||||
|
- landing
|
||||||
|
- processing
|
||||||
|
- curated
|
||||||
|
- common
|
||||||
|
|
||||||
|
This separation into projects allows adhering to the least-privilege principle by using project-level roles.
|
||||||
|
|
||||||
|
The script will create the following projects:
|
||||||
|
|
||||||
|
- **Landing** Data, stored in relevant formats. Structured data can be stored in BigQuery or in GCS using an appropriate file format such as AVRO or Parquet. Unstructured data stored on Cloud Storage.
|
||||||
|
- **Processing** Used to host all resources needed to process and orchestrate data movement. Cloud Composer orchestrates all tasks that move data across layers. Cloud Dataproc Serveless process and move data between layers. Anonymization or tokenization of Personally Identifiable Information (PII) can be implemented here using Cloud DLP or a custom solution, depending on your requirements.
|
||||||
|
- **Curated** Cleansed, aggregated and curated data.
|
||||||
|
- **Common** Common services such as [Cloud DLP](https://cloud.google.com/dlp) or [Data Catalog](https://cloud.google.com/data-catalog/docs/concepts/overview).
|
||||||
|
|
||||||
|
## Roles
|
||||||
|
|
||||||
|
We assign roles on resources at the project level, granting the appropriate roles via groups (humans) and service accounts (services and applications) according to best practices.
|
||||||
|
|
||||||
|
## Service accounts
|
||||||
|
|
||||||
|
Service account creation follows the least privilege principle, performing a single task which requires access to a defined set of resources. The table below shows a high level overview of roles for each service account on each data layer, using READ or WRITE access patterns for simplicity.
|
||||||
|
|
||||||
|
A full reference of IAM roles managed by the Data Platform is [available here](IAM.md).
|
||||||
|
|
||||||
|
For detailed roles please refer to the code.
|
||||||
|
|
||||||
|
Using of service account keys within a data pipeline exposes to several security risks deriving from a credentials leak. This blueprint shows how to leverage impersonation to avoid the need of creating keys.
|
||||||
|
|
||||||
|
## User groups
|
||||||
|
|
||||||
|
User groups provide a stable frame of reference that allows decoupling the final set of permissions from the stage where entities and resources are created, and their IAM bindings defined.
|
||||||
|
|
||||||
|
We use three groups to control access to resources:
|
||||||
|
|
||||||
|
- *Data Engineers* They handle and run the Data Hub, with read access to all resources in order to troubleshoot possible issues with pipelines. This team can also impersonate any service account.
|
||||||
|
- *Data Analysts*. They perform analysis on datasets, with read access to the Data Warehouse Confidential project, and BigQuery READ/WRITE access to the playground project.
|
||||||
|
- *Data Security*:. They handle security configurations related to the Data Hub. This team has admin access to the common project to configure Cloud DLP templates or Data Catalog policy tags.
|
||||||
|
|
||||||
|
### Virtual Private Cloud (VPC) design
|
||||||
|
|
||||||
|
As is often the case in real-world configurations, this blueprint accepts as input an existing [Shared-VPC](https://cloud.google.com/vpc/docs/shared-vpc) via the `network_config` variable. Make sure that the GKE API (`container.googleapis.com`) is enabled in the VPC host project.
|
||||||
|
|
||||||
|
If the `network_config` variable is not provided, one VPC will be created in each project that supports network resources (load, transformation and orchestration).
|
||||||
|
|
||||||
|
### IP ranges and subnetting
|
||||||
|
|
||||||
|
To deploy this blueprint with self-managed VPCs you need the following ranges:
|
||||||
|
|
||||||
|
- one /24 for the processing project VPC subnet used for Cloud Dataproc workers
|
||||||
|
- one /24 range for the orchestration VPC subnet used for Composer workers
|
||||||
|
- one /22 and one /24 ranges for the secondary ranges associated with the orchestration VPC subnet
|
||||||
|
|
||||||
|
If you are using Shared VPC, you need one subnet with one /22 and one /24 secondary range defined for Composer pods and services.
|
||||||
|
|
||||||
|
In both VPC scenarios, you also need these ranges for Composer:
|
||||||
|
|
||||||
|
- one /24 for Cloud SQL
|
||||||
|
- one /28 for the GKE control plane
|
||||||
|
|
||||||
|
### Resource naming conventions
|
||||||
|
|
||||||
|
Resources follow the naming convention described below.
|
||||||
|
|
||||||
|
- `prefix-layer` for projects
|
||||||
|
- `prefix-layer-product` for resources
|
||||||
|
- `prefix-layer[2]-gcp-product[2]-counter` for services and service accounts
|
||||||
|
|
||||||
|
### Encryption
|
||||||
|
|
||||||
|
We suggest a centralized approach to key management, where Organization Security is the only team that can access encryption material, and keyrings and keys are managed in a project external to the Data Platform.
|
||||||
|
|
||||||
|
![Centralized Cloud Key Management high-level diagram](./images/kms_diagram.png "Centralized Cloud Key Management high-level diagram")
|
||||||
|
|
||||||
|
To configure the use of Cloud KMS on resources, you have to specify the key id on the `service_encryption_keys` variable. Key locations should match resource locations. Example:
|
||||||
|
|
||||||
|
```tfvars
|
||||||
|
service_encryption_keys = {
|
||||||
|
bq = "KEY_URL"
|
||||||
|
composer = "KEY_URL"
|
||||||
|
compute = "KEY_URL"
|
||||||
|
storage = "KEY_URL"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This step is optional and depends on customer policies and security best practices.
|
||||||
|
|
||||||
|
## Data Anonymization
|
||||||
|
|
||||||
|
We suggest using Cloud Data Loss Prevention to identify/mask/tokenize your confidential data.
|
||||||
|
|
||||||
|
While implementing a Data Loss Prevention strategy is out of scope for this blueprint, we enable the service in two different projects so that [Cloud Data Loss Prevention templates](https://cloud.google.com/dlp/docs/concepts-templates) can be configured in one of two ways:
|
||||||
|
|
||||||
|
- during the ingestion phase, from Cloud Dataproc
|
||||||
|
- within the curated layer, in [BigQuery](https://cloud.google.com/bigquery/docs/scan-with-dlp) or [Cloud Dataproc](https://cloud.google.com/dataproct)
|
||||||
|
|
||||||
|
Cloud Data Loss Prevention resources and templates should be stored in the Common project:
|
||||||
|
|
||||||
|
![Centralized Cloud Data Loss Prevention high-level diagram](./images/dlp_diagram.png "Centralized Cloud Data Loss Prevention high-level diagram")
|
||||||
|
|
||||||
|
You can find more details and best practices on using DLP to De-identification and re-identification of PII in large-scale datasets in the [GCP documentation](https://cloud.google.com/architecture/de-identification-re-identification-pii-using-cloud-dlp).
|
||||||
|
|
||||||
|
## Data Catalog
|
||||||
|
|
||||||
|
[Data Catalog](https://cloud.google.com/data-catalog) helps you to document your data entry at scale. Data Catalog relies on [tags](https://cloud.google.com/data-catalog/docs/tags-and-tag-templates#tags) and [tag template](https://cloud.google.com/data-catalog/docs/tags-and-tag-templates#tag-templates) to manage metadata for all data entries in a unified and centralized service. To implement [column-level security](https://cloud.google.com/bigquery/docs/column-level-security-intro) on BigQuery, we suggest to use `Tags` and `Tag templates`.
|
||||||
|
|
||||||
|
The default configuration will implement 3 tags:
|
||||||
|
|
||||||
|
- `3_Confidential`: policy tag for columns that include very sensitive information, such as credit card numbers.
|
||||||
|
- `2_Private`: policy tag for columns that include sensitive personal identifiable information (PII) information, such as a person's first name.
|
||||||
|
- `1_Sensitive`: policy tag for columns that include data that cannot be made public, such as the credit limit.
|
||||||
|
|
||||||
|
Anything that is not tagged is available to all users who have access to the data warehouse.
|
||||||
|
|
||||||
|
For the purpose of the blueprint no groups has access to tagged data. You can configure your tags and roles associated by configuring the `data_catalog_tags` variable. We suggest using the "[Best practices for using policy tags in BigQuery](https://cloud.google.com/bigquery/docs/best-practices-policy-tags)" article as a guide to designing your tags structure and access pattern.
|
||||||
|
|
||||||
|
## How to run this script
|
||||||
|
|
||||||
|
To deploy this blueprint on your GCP organization, you will need
|
||||||
|
|
||||||
|
- a folder or organization where new projects will be created
|
||||||
|
- a billing account that will be associated with the new projects
|
||||||
|
|
||||||
|
The Data Platform is meant to be executed by a Service Account (or a regular user) having this minimal set of permission:
|
||||||
|
|
||||||
|
- **Billing account**
|
||||||
|
- `roles/billing.user`
|
||||||
|
- **Folder level**:
|
||||||
|
- `roles/resourcemanager.folderAdmin`
|
||||||
|
- `roles/resourcemanager.projectCreator`
|
||||||
|
- **KMS Keys** (If CMEK encryption in use):
|
||||||
|
- `roles/cloudkms.admin` or a custom role with `cloudkms.cryptoKeys.getIamPolicy`, `cloudkms.cryptoKeys.list`, `cloudkms.cryptoKeys.setIamPolicy` permissions
|
||||||
|
- **Shared VPC host project** (if configured):\
|
||||||
|
- `roles/compute.xpnAdmin` on the host project folder or org
|
||||||
|
- `roles/resourcemanager.projectIamAdmin` on the host project, either with no conditions or with a condition allowing [delegated role grants](https://medium.com/google-cloud/managing-gcp-service-usage-through-delegated-role-grants-a843610f2226#:~:text=Delegated%20role%20grants%20is%20a,setIamPolicy%20permission%20on%20a%20resource.) for `roles/compute.networkUser`, `roles/composer.sharedVpcAgent`, `roles/container.hostServiceAgentUser`
|
||||||
|
|
||||||
|
## Variable configuration
|
||||||
|
|
||||||
|
There are three sets of variables you will need to fill in:
|
||||||
|
|
||||||
|
```tfvars
|
||||||
|
project_config = {
|
||||||
|
billing_account_id = "123456-123456-123456"
|
||||||
|
parent = "folders/12345678"
|
||||||
|
}
|
||||||
|
organization_domain = "domain.com"
|
||||||
|
prefix = "myprefix"
|
||||||
|
```
|
||||||
|
|
||||||
|
For more fine details check variables on [`variables.tf`](./variables.tf) and update according to the desired configuration.
|
||||||
|
|
||||||
|
*Remember* to create team groups described [below](#groups).
|
||||||
|
|
||||||
|
Once the configuration is complete, run the project factory by running
|
||||||
|
|
||||||
|
```bash
|
||||||
|
terraform init
|
||||||
|
terraform apply
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to use this blueprint from Terraform
|
||||||
|
|
||||||
|
While this blueprint can be used as a standalone deployment, it can also be called directly as a Terraform module by providing the variables values as show below:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
module "data-platform" {
|
||||||
|
source = "./fabric/blueprints/data-solutions/data-platform-minimal/"
|
||||||
|
organization_domain = "example.com"
|
||||||
|
project_config = {
|
||||||
|
billing_account_id = "123456-123456-123456"
|
||||||
|
parent = "folders/12345678"
|
||||||
|
}
|
||||||
|
prefix = "myprefix"
|
||||||
|
}
|
||||||
|
|
||||||
|
# tftest modules=21 resources=110
|
||||||
|
```
|
||||||
|
|
||||||
|
## Customizations
|
||||||
|
|
||||||
|
### Assign roles at BQ Dataset level
|
||||||
|
|
||||||
|
To handle multiple groups of `data-analysts` accessing the same Data Warehouse layer projects but only to the dataset belonging to a specific group, you may want to assign roles at BigQuery dataset level instead of at project-level.
|
||||||
|
To do this, you need to remove IAM binging at project-level for the `data-analysts` group and give roles at BigQuery dataset level using the `iam` variable on `bigquery-dataset` modules.
|
||||||
|
|
||||||
|
### Project Configuration
|
||||||
|
|
||||||
|
The solution can be deployed by creating projects on a given parent (organization or folder) or on existing projects. Configure variable `project_config` accordingly.
|
||||||
|
|
||||||
|
When you deploy the blueprint on existing projects, the blueprint is designed to rely on different projects configuring IAM binding with an additive approach.
|
||||||
|
|
||||||
|
Once you have identified the required project granularity for your use case, we suggest adapting the terraform script accordingly and relying on authoritative IAM binding.
|
||||||
|
|
||||||
|
### Shared VPC
|
||||||
|
|
||||||
|
To configure the use of a shared VPC, configure the `network_config`, example:
|
||||||
|
|
||||||
|
```tfvars
|
||||||
|
network_config = {
|
||||||
|
host_project = "PROJECT_ID"
|
||||||
|
network_self_link = "https://www.googleapis.com/compute/v1/projects/PROJECT_ID/global/networks/NAME"
|
||||||
|
subnet_self_links = {
|
||||||
|
processing_transformation = "https://www.googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/NAME"
|
||||||
|
processing_composer = "https://www.googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/NAME"
|
||||||
|
}
|
||||||
|
composer_ip_ranges = {
|
||||||
|
cloudsql = "192.168.XXX.XXX/24"
|
||||||
|
gke_master = "192.168.XXX.XXX/28"
|
||||||
|
}
|
||||||
|
composer_secondary_ranges = {
|
||||||
|
pods = "pods"
|
||||||
|
services = "services"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customer Managed Encryption key
|
||||||
|
|
||||||
|
To configure the use of Cloud KMS on resources, configure the `service_encryption_keys` variable. Key locations should match resource locations. Example:
|
||||||
|
|
||||||
|
```tfvars
|
||||||
|
service_encryption_keys = {
|
||||||
|
bq = "KEY_URL"
|
||||||
|
composer = "KEY_URL"
|
||||||
|
compute = "KEY_URL"
|
||||||
|
storage = "KEY_URL"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Demo pipeline
|
||||||
|
|
||||||
|
The application layer is out of scope of this script. As a demo purpuse only, one Cloud Composer DAGs is provided to document how to deploy a Cloud Dataproc Serverless job on the architecture. You can find examples in the `[demo](./demo)` folder.
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
| name | description | modules | resources |
|
||||||
|
|---|---|---|---|
|
||||||
|
| [01-landing.tf](./01-landing.tf) | Landing project and resources. | <code>gcs</code> · <code>iam-service-account</code> · <code>project</code> | |
|
||||||
|
| [02-composer.tf](./02-composer.tf) | Cloud Composer resources. | <code>iam-service-account</code> | <code>google_composer_environment</code> |
|
||||||
|
| [02-dataproc.tf](./02-dataproc.tf) | Cloud Dataproc resources. | <code>dataproc</code> · <code>gcs</code> · <code>iam-service-account</code> | |
|
||||||
|
| [02-processing.tf](./02-processing.tf) | Processing project and VPC. | <code>gcs</code> · <code>net-cloudnat</code> · <code>net-vpc</code> · <code>net-vpc-firewall</code> · <code>project</code> | |
|
||||||
|
| [03-curated.tf](./03-curated.tf) | Data curated project and resources. | <code>bigquery-dataset</code> · <code>gcs</code> · <code>project</code> | |
|
||||||
|
| [04-common.tf](./04-common.tf) | Common project and resources. | <code>data-catalog-policy-tag</code> · <code>project</code> | |
|
||||||
|
| [main.tf](./main.tf) | Core locals. | | <code>google_project_iam_member</code> |
|
||||||
|
| [outputs.tf](./outputs.tf) | Output variables. | | |
|
||||||
|
| [variables.tf](./variables.tf) | Terraform Variables. | | |
|
||||||
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
|
## Variables
|
||||||
|
|
||||||
|
| name | description | type | required | default |
|
||||||
|
|---|---|:---:|:---:|:---:|
|
||||||
|
| [organization_domain](variables.tf#L122) | Organization domain. | <code>string</code> | ✓ | |
|
||||||
|
| [prefix](variables.tf#L127) | Prefix used for resource names. | <code>string</code> | ✓ | |
|
||||||
|
| [project_config](variables.tf#L136) | Provide 'billing_account_id' value if project creation is needed, uses existing 'project_ids' if null. Parent is in 'folders/nnn' or 'organizations/nnn' format. | <code title="object({ billing_account_id = optional(string, null) parent = string project_ids = optional(object({ landing = string processing = string curated = string common = string }), { landing = "lnd" processing = "prc" curated = "cur" common = "cmn" } ) })">object({…})</code> | ✓ | |
|
||||||
|
| [composer_config](variables.tf#L17) | Cloud Composer config. | <code title="object({ environment_size = optional(string, "ENVIRONMENT_SIZE_SMALL") software_config = optional(object({ airflow_config_overrides = optional(map(string), {}) pypi_packages = optional(map(string), {}) env_variables = optional(map(string), {}) image_version = optional(string, "composer-2-airflow-2") }), {}) workloads_config = optional(object({ scheduler = optional(object({ cpu = optional(number, 0.5) memory_gb = optional(number, 1.875) storage_gb = optional(number, 1) count = optional(number, 1) } ), {}) web_server = optional(object({ cpu = optional(number, 0.5) memory_gb = optional(number, 1.875) storage_gb = optional(number, 1) }), {}) worker = optional(object({ cpu = optional(number, 0.5) memory_gb = optional(number, 1.875) storage_gb = optional(number, 1) min_count = optional(number, 1) max_count = optional(number, 3) } ), {}) }), {}) })">object({…})</code> | | <code>{}</code> |
|
||||||
|
| [data_catalog_tags](variables.tf#L54) | List of Data Catalog Policy tags to be created with optional IAM binging configuration in {tag => {ROLE => [MEMBERS]}} format. | <code>map(map(list(string)))</code> | | <code title="{ "3_Confidential" = null "2_Private" = null "1_Sensitive" = null }">{…}</code> |
|
||||||
|
| [data_force_destroy](variables.tf#L65) | Flag to set 'force_destroy' on data services like BiguQery or Cloud Storage. | <code>bool</code> | | <code>false</code> |
|
||||||
|
| [enable_services](variables.tf#L71) | Flag to enable or disable services in the Data Platform. | <code title="object({ composer = optional(bool, true) dataproc_history_server = optional(bool, true) })">object({…})</code> | | <code>{}</code> |
|
||||||
|
| [groups](variables.tf#L80) | User groups. | <code>map(string)</code> | | <code title="{ data-analysts = "gcp-data-analysts" data-engineers = "gcp-data-engineers" data-security = "gcp-data-security" }">{…}</code> |
|
||||||
|
| [location](variables.tf#L90) | Location used for multi-regional resources. | <code>string</code> | | <code>"eu"</code> |
|
||||||
|
| [network_config](variables.tf#L96) | Shared VPC network configurations to use. If null networks will be created in projects. | <code title="object({ host_project = optional(string) network_self_link = optional(string) subnet_self_links = optional(object({ processing_transformation = string processing_composer = string }), null) composer_ip_ranges = optional(object({ connection_subnetwork = optional(string) cloud_sql = optional(string, "10.20.10.0/24") gke_master = optional(string, "10.20.11.0/28") pods_range_name = optional(string, "pods") services_range_name = optional(string, "services") }), {}) })">object({…})</code> | | <code>{}</code> |
|
||||||
|
| [project_suffix](variables.tf#L160) | Suffix used only for project ids. | <code>string</code> | | <code>null</code> |
|
||||||
|
| [region](variables.tf#L166) | Region used for regional resources. | <code>string</code> | | <code>"europe-west1"</code> |
|
||||||
|
| [service_encryption_keys](variables.tf#L172) | Cloud KMS to use to encrypt different services. Key location should match service region. | <code title="object({ bq = optional(string) composer = optional(string) compute = optional(string) storage = optional(string) })">object({…})</code> | | <code>{}</code> |
|
||||||
|
|
||||||
|
## Outputs
|
||||||
|
|
||||||
|
| name | description | sensitive |
|
||||||
|
|---|---|:---:|
|
||||||
|
| [bigquery-datasets](outputs.tf#L17) | BigQuery datasets. | |
|
||||||
|
| [dataproc-history-server](outputs.tf#L24) | List of bucket names which have been assigned to the cluster. | |
|
||||||
|
| [gcs-buckets](outputs.tf#L29) | GCS buckets. | ✓ |
|
||||||
|
| [kms_keys](outputs.tf#L39) | Cloud MKS keys. | |
|
||||||
|
| [projects](outputs.tf#L44) | GCP Projects informations. | |
|
||||||
|
| [vpc_network](outputs.tf#L62) | VPC network. | |
|
||||||
|
| [vpc_subnet](outputs.tf#L70) | VPC subnetworks. | |
|
||||||
|
|
||||||
|
<!-- END TFDOC -->
|
|
@ -0,0 +1,93 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# Copyright 2019 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from airflow import models
|
||||||
|
from airflow.providers.google.cloud.operators.dataproc import (
|
||||||
|
DataprocCreateBatchOperator, DataprocDeleteBatchOperator, DataprocGetBatchOperator, DataprocListBatchesOperator
|
||||||
|
|
||||||
|
)
|
||||||
|
from airflow.utils.dates import days_ago
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------------
|
||||||
|
# Get variables
|
||||||
|
# --------------------------------------------------------------------------------
|
||||||
|
BQ_LOCATION = os.environ.get("BQ_LOCATION")
|
||||||
|
CURATED_BQ_DATASET = os.environ.get("CURATED_BQ_DATASET")
|
||||||
|
CURATED_GCS = os.environ.get("CURATED_GCS")
|
||||||
|
CURATED_PRJ = os.environ.get("CURATED_PRJ")
|
||||||
|
DP_KMS_KEY = os.environ.get("DP_KMS_KEY", "")
|
||||||
|
DP_REGION = os.environ.get("DP_REGION")
|
||||||
|
GCP_REGION = os.environ.get("GCP_REGION")
|
||||||
|
LAND_PRJ = os.environ.get("LAND_PRJ")
|
||||||
|
LAND_BQ_DATASET = os.environ.get("LAND_BQ_DATASET")
|
||||||
|
LAND_GCS = os.environ.get("LAND_GCS")
|
||||||
|
PHS_CLUSTER_NAME = os.environ.get("PHS_CLUSTER_NAME")
|
||||||
|
PROCESSING_GCS = os.environ.get("PROCESSING_GCS")
|
||||||
|
PROCESSING_PRJ = os.environ.get("PROCESSING_PRJ")
|
||||||
|
PROCESSING_SA = os.environ.get("PROCESSING_SA")
|
||||||
|
PROCESSING_SUBNET = os.environ.get("PROCESSING_SUBNET")
|
||||||
|
PROCESSING_VPC = os.environ.get("PROCESSING_VPC")
|
||||||
|
|
||||||
|
PYTHON_FILE_LOCATION = "gs://"+PROCESSING_GCS+"/pyspark_sort.py"
|
||||||
|
PHS_CLUSTER_PATH = "projects/"+PROCESSING_PRJ+"/regions/"+DP_REGION+"/clusters/"+PHS_CLUSTER_NAME
|
||||||
|
|
||||||
|
default_args = {
|
||||||
|
# Tell airflow to start one day ago, so that it runs as soon as you upload it
|
||||||
|
"start_date": days_ago(1),
|
||||||
|
"region": DP_REGION,
|
||||||
|
}
|
||||||
|
with models.DAG(
|
||||||
|
"dataproc_batch_operators", # The id you will see in the DAG airflow page
|
||||||
|
default_args=default_args, # The interval with which to schedule the DAG
|
||||||
|
schedule_interval=None, # Override to match your needs
|
||||||
|
) as dag:
|
||||||
|
|
||||||
|
create_batch = DataprocCreateBatchOperator(
|
||||||
|
task_id="batch_create",
|
||||||
|
project_id=PROCESSING_PRJ,
|
||||||
|
batch={
|
||||||
|
"environment_config": {
|
||||||
|
"execution_config": {
|
||||||
|
"service_account": PROCESSING_SA,
|
||||||
|
"subnetwork_uri": PROCESSING_SUBNET
|
||||||
|
},
|
||||||
|
"peripherals_config": {
|
||||||
|
"spark_history_server_config":{
|
||||||
|
"dataproc_cluster": PHS_CLUSTER_PATH
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pyspark_batch": {
|
||||||
|
"main_python_file_uri": PYTHON_FILE_LOCATION,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
batch_id="batch-create-phs-"+str(int(time.time())),
|
||||||
|
)
|
||||||
|
|
||||||
|
list_batches = DataprocListBatchesOperator(
|
||||||
|
task_id="list-all-batches",
|
||||||
|
)
|
||||||
|
|
||||||
|
get_batch = DataprocGetBatchOperator(
|
||||||
|
task_id="get_batch",
|
||||||
|
batch_id="batch-create-phs",
|
||||||
|
)
|
||||||
|
|
||||||
|
create_batch >> list_batches >> get_batch
|
|
@ -0,0 +1,30 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# Copyright 2019 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" Sample pyspark script to be uploaded to Cloud Storage and run on
|
||||||
|
Cloud Dataproc.
|
||||||
|
Note this file is not intended to be run directly, but run inside a PySpark
|
||||||
|
environment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# [START dataproc_pyspark_sort]
|
||||||
|
import pyspark
|
||||||
|
|
||||||
|
sc = pyspark.SparkContext()
|
||||||
|
rdd = sc.parallelize(["Hello,", "world!", "dog", "elephant", "panther"])
|
||||||
|
words = sorted(rdd.collect())
|
||||||
|
print(words)
|
||||||
|
# [END dataproc_pyspark_sort]
|
Binary file not shown.
After Width: | Height: | Size: 102 KiB |
Binary file not shown.
After Width: | Height: | Size: 45 KiB |
Binary file not shown.
After Width: | Height: | Size: 54 KiB |
|
@ -0,0 +1,26 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Core locals.
|
||||||
|
|
||||||
|
locals {
|
||||||
|
groups = {
|
||||||
|
for k, v in var.groups : k => "${v}@${var.organization_domain}"
|
||||||
|
}
|
||||||
|
groups_iam = {
|
||||||
|
for k, v in local.groups : k => "group:${v}"
|
||||||
|
}
|
||||||
|
project_suffix = var.project_suffix == null ? "" : "-${var.project_suffix}"
|
||||||
|
use_shared_vpc = var.network_config.host_project != null
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Output variables.
|
||||||
|
|
||||||
|
output "bigquery-datasets" {
|
||||||
|
description = "BigQuery datasets."
|
||||||
|
value = {
|
||||||
|
curated = module.cur-bq-0.dataset_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "dataproc-history-server" {
|
||||||
|
description = "List of bucket names which have been assigned to the cluster."
|
||||||
|
value = one(module.processing-dp-historyserver)
|
||||||
|
}
|
||||||
|
|
||||||
|
output "gcs-buckets" {
|
||||||
|
description = "GCS buckets."
|
||||||
|
sensitive = true
|
||||||
|
value = {
|
||||||
|
landing-cs-0 = module.land-sa-cs-0,
|
||||||
|
processing-cs-0 = module.processing-cs-0,
|
||||||
|
cur-cs-0 = module.cur-cs-0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "kms_keys" {
|
||||||
|
description = "Cloud MKS keys."
|
||||||
|
value = var.service_encryption_keys
|
||||||
|
}
|
||||||
|
|
||||||
|
output "projects" {
|
||||||
|
description = "GCP Projects informations."
|
||||||
|
value = {
|
||||||
|
project_number = {
|
||||||
|
landing = module.land-project.number,
|
||||||
|
common = module.common-project.number,
|
||||||
|
curated = module.cur-project.number,
|
||||||
|
processing = module.processing-project.number,
|
||||||
|
}
|
||||||
|
project_id = {
|
||||||
|
landing = module.land-project.project_id,
|
||||||
|
common = module.common-project.project_id,
|
||||||
|
curated = module.cur-project.project_id,
|
||||||
|
processing = module.processing-project.project_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_network" {
|
||||||
|
description = "VPC network."
|
||||||
|
value = {
|
||||||
|
processing_transformation = local.processing_vpc
|
||||||
|
processing_composer = local.processing_vpc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_subnet" {
|
||||||
|
description = "VPC subnetworks."
|
||||||
|
value = {
|
||||||
|
processing_transformation = local.processing_subnet
|
||||||
|
processing_composer = local.processing_subnet
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,182 @@
|
||||||
|
# Copyright 2022 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# tfdoc:file:description Terraform Variables.
|
||||||
|
|
||||||
|
variable "composer_config" {
|
||||||
|
description = "Cloud Composer config."
|
||||||
|
type = object({
|
||||||
|
environment_size = optional(string, "ENVIRONMENT_SIZE_SMALL")
|
||||||
|
software_config = optional(object({
|
||||||
|
airflow_config_overrides = optional(map(string), {})
|
||||||
|
pypi_packages = optional(map(string), {})
|
||||||
|
env_variables = optional(map(string), {})
|
||||||
|
image_version = optional(string, "composer-2-airflow-2")
|
||||||
|
}), {})
|
||||||
|
workloads_config = optional(object({
|
||||||
|
scheduler = optional(object({
|
||||||
|
cpu = optional(number, 0.5)
|
||||||
|
memory_gb = optional(number, 1.875)
|
||||||
|
storage_gb = optional(number, 1)
|
||||||
|
count = optional(number, 1)
|
||||||
|
}
|
||||||
|
), {})
|
||||||
|
web_server = optional(object({
|
||||||
|
cpu = optional(number, 0.5)
|
||||||
|
memory_gb = optional(number, 1.875)
|
||||||
|
storage_gb = optional(number, 1)
|
||||||
|
}), {})
|
||||||
|
worker = optional(object({
|
||||||
|
cpu = optional(number, 0.5)
|
||||||
|
memory_gb = optional(number, 1.875)
|
||||||
|
storage_gb = optional(number, 1)
|
||||||
|
min_count = optional(number, 1)
|
||||||
|
max_count = optional(number, 3)
|
||||||
|
}
|
||||||
|
), {})
|
||||||
|
}), {})
|
||||||
|
})
|
||||||
|
nullable = false
|
||||||
|
default = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "data_catalog_tags" {
|
||||||
|
description = "List of Data Catalog Policy tags to be created with optional IAM binging configuration in {tag => {ROLE => [MEMBERS]}} format."
|
||||||
|
type = map(map(list(string)))
|
||||||
|
nullable = false
|
||||||
|
default = {
|
||||||
|
"3_Confidential" = null
|
||||||
|
"2_Private" = null
|
||||||
|
"1_Sensitive" = null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "data_force_destroy" {
|
||||||
|
description = "Flag to set 'force_destroy' on data services like BiguQery or Cloud Storage."
|
||||||
|
type = bool
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_services" {
|
||||||
|
description = "Flag to enable or disable services in the Data Platform."
|
||||||
|
type = object({
|
||||||
|
composer = optional(bool, true)
|
||||||
|
dataproc_history_server = optional(bool, true)
|
||||||
|
})
|
||||||
|
default = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "groups" {
|
||||||
|
description = "User groups."
|
||||||
|
type = map(string)
|
||||||
|
default = {
|
||||||
|
data-analysts = "gcp-data-analysts"
|
||||||
|
data-engineers = "gcp-data-engineers"
|
||||||
|
data-security = "gcp-data-security"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "location" {
|
||||||
|
description = "Location used for multi-regional resources."
|
||||||
|
type = string
|
||||||
|
default = "eu"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "network_config" {
|
||||||
|
description = "Shared VPC network configurations to use. If null networks will be created in projects."
|
||||||
|
type = object({
|
||||||
|
host_project = optional(string)
|
||||||
|
network_self_link = optional(string)
|
||||||
|
subnet_self_links = optional(object({
|
||||||
|
processing_transformation = string
|
||||||
|
processing_composer = string
|
||||||
|
}), null)
|
||||||
|
composer_ip_ranges = optional(object({
|
||||||
|
connection_subnetwork = optional(string)
|
||||||
|
cloud_sql = optional(string, "10.20.10.0/24")
|
||||||
|
gke_master = optional(string, "10.20.11.0/28")
|
||||||
|
pods_range_name = optional(string, "pods")
|
||||||
|
services_range_name = optional(string, "services")
|
||||||
|
}), {})
|
||||||
|
# web_server_network_access_control = list(string)
|
||||||
|
})
|
||||||
|
nullable = false
|
||||||
|
default = {}
|
||||||
|
validation {
|
||||||
|
condition = (var.network_config.composer_ip_ranges.cloud_sql == null) != (var.network_config.composer_ip_ranges.connection_subnetwork == null)
|
||||||
|
error_message = "One, and only one, of `network_config.composer_ip_ranges.cloud_sql` or `network_config.composer_ip_ranges.connection_subnetwork` must be specified."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "organization_domain" {
|
||||||
|
description = "Organization domain."
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "prefix" {
|
||||||
|
description = "Prefix used for resource names."
|
||||||
|
type = string
|
||||||
|
validation {
|
||||||
|
condition = var.prefix != ""
|
||||||
|
error_message = "Prefix cannot be empty."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "project_config" {
|
||||||
|
description = "Provide 'billing_account_id' value if project creation is needed, uses existing 'project_ids' if null. Parent is in 'folders/nnn' or 'organizations/nnn' format."
|
||||||
|
type = object({
|
||||||
|
billing_account_id = optional(string, null)
|
||||||
|
parent = string
|
||||||
|
project_ids = optional(object({
|
||||||
|
landing = string
|
||||||
|
processing = string
|
||||||
|
curated = string
|
||||||
|
common = string
|
||||||
|
}), {
|
||||||
|
landing = "lnd"
|
||||||
|
processing = "prc"
|
||||||
|
curated = "cur"
|
||||||
|
common = "cmn"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
|
validation {
|
||||||
|
condition = var.project_config.billing_account_id != null || var.project_config.project_ids != null
|
||||||
|
error_message = "At least one of project_config.billing_account_id or var.project_config.project_ids should be set."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "project_suffix" {
|
||||||
|
description = "Suffix used only for project ids."
|
||||||
|
type = string
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "region" {
|
||||||
|
description = "Region used for regional resources."
|
||||||
|
type = string
|
||||||
|
default = "europe-west1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "service_encryption_keys" {
|
||||||
|
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
||||||
|
type = object({
|
||||||
|
bq = optional(string)
|
||||||
|
composer = optional(string)
|
||||||
|
compute = optional(string)
|
||||||
|
storage = optional(string)
|
||||||
|
})
|
||||||
|
nullable = false
|
||||||
|
default = {}
|
||||||
|
}
|
|
@ -22,7 +22,7 @@ As is often the case in real-world configurations, this blueprint accepts as inp
|
||||||
|
|
||||||
If the network_config variable is not provided, one VPC will be created in each project that supports network resources (load, transformation and orchestration).
|
If the network_config variable is not provided, one VPC will be created in each project that supports network resources (load, transformation and orchestration).
|
||||||
|
|
||||||
## Deploy your enviroment
|
## Deploy your environment
|
||||||
|
|
||||||
We assume the identiy running the following steps has the following role:
|
We assume the identiy running the following steps has the following role:
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ Run Terraform init:
|
||||||
terraform init
|
terraform init
|
||||||
```
|
```
|
||||||
|
|
||||||
Configure the Terraform variable in your terraform.tfvars file. You need to spefify at least the following variables:
|
Configure the Terraform variable in your terraform.tfvars file. You need to specify at least the following variables:
|
||||||
|
|
||||||
```
|
```
|
||||||
prefix = "prefix"
|
prefix = "prefix"
|
||||||
|
@ -48,7 +48,7 @@ You can run now:
|
||||||
terraform apply
|
terraform apply
|
||||||
```
|
```
|
||||||
|
|
||||||
You can now connect to the Vertex AI notbook to perform your data analysy.
|
You can now connect to the Vertex AI notbook to perform your data analysis.
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
@ -86,5 +86,5 @@ module "test" {
|
||||||
parent = "folders/467898377"
|
parent = "folders/467898377"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# tftest modules=8 resources=40
|
# tftest modules=8 resources=41
|
||||||
```
|
```
|
||||||
|
|
|
@ -58,7 +58,7 @@ variable "region" {
|
||||||
default = "europe-west1"
|
default = "europe-west1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_encryption_keys" { # service encription key
|
variable "service_encryption_keys" { # service encryption key
|
||||||
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
||||||
type = object({
|
type = object({
|
||||||
bq = string
|
bq = string
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ The main components that we would be setting up are (to learn more about these p
|
||||||
* [Cloud Storage (GCS) bucket](https://cloud.google.com/storage/): data lake solution to store extracted raw data that must undergo some kind of transformation.
|
* [Cloud Storage (GCS) bucket](https://cloud.google.com/storage/): data lake solution to store extracted raw data that must undergo some kind of transformation.
|
||||||
* [Cloud Dataflow pipeline](https://cloud.google.com/dataflow): to build fully managed batch and streaming pipelines to transform data stored in GCS buckets ready for processing in the Data Warehouse using Apache Beam.
|
* [Cloud Dataflow pipeline](https://cloud.google.com/dataflow): to build fully managed batch and streaming pipelines to transform data stored in GCS buckets ready for processing in the Data Warehouse using Apache Beam.
|
||||||
* [BigQuery datasets and tables](https://cloud.google.com/bigquery): to store the transformed data in and query it using SQL, use it to make reports or begin training [machine learning](https://cloud.google.com/bigquery-ml/docs/introduction) models without having to take your data out.
|
* [BigQuery datasets and tables](https://cloud.google.com/bigquery): to store the transformed data in and query it using SQL, use it to make reports or begin training [machine learning](https://cloud.google.com/bigquery-ml/docs/introduction) models without having to take your data out.
|
||||||
* [Service accounts](https://cloud.google.com/iam/docs/service-accounts) (__created with least privilege on each resource__): one for uploading data into the GCS bucket, one for Orchestration, one for Dataflow instances and one for the BigQuery tables. You can also configure users or groups of users to assign them a viewer role on the created resources and the ability to impersonate service accounts to test the Dataflow pipelines before automating them with a tool like [Cloud Composer](https://cloud.google.com/composer).
|
* [Service accounts](https://cloud.google.com/iam/docs/service-account-overview) (__created with least privilege on each resource__): one for uploading data into the GCS bucket, one for Orchestration, one for Dataflow instances and one for the BigQuery tables. You can also configure users or groups of users to assign them a viewer role on the created resources and the ability to impersonate service accounts to test the Dataflow pipelines before automating them with a tool like [Cloud Composer](https://cloud.google.com/composer).
|
||||||
|
|
||||||
For a full list of the resources that will be created, please refer to the [github repository](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/blueprints/data-solutions/gcs-to-bq-with-least-privileges) for this project. If you're migrating from another Cloud Provider, refer to [this](https://cloud.google.com/free/docs/aws-azure-gcp-service-comparison) documentation to see equivalent services and comparisons in Microsoft Azure and Amazon Web Services
|
For a full list of the resources that will be created, please refer to the [github repository](https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/tree/master/blueprints/data-solutions/gcs-to-bq-with-least-privileges) for this project. If you're migrating from another Cloud Provider, refer to [this](https://cloud.google.com/free/docs/aws-azure-gcp-service-comparison) documentation to see equivalent services and comparisons in Microsoft Azure and Amazon Web Services
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,15 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 1.3.1"
|
required_version = ">= 1.4.4"
|
||||||
required_providers {
|
required_providers {
|
||||||
google = {
|
google = {
|
||||||
source = "hashicorp/google"
|
source = "hashicorp/google"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
}
|
||||||
google-beta = {
|
google-beta = {
|
||||||
source = "hashicorp/google-beta"
|
source = "hashicorp/google-beta"
|
||||||
version = ">= 4.55.0" # tftest
|
version = ">= 4.60.0" # tftest
|
||||||
}
|
|
||||||
local = {
|
|
||||||
source = "hashicorp/local"
|
|
||||||
version = "2.2.3"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ The Shielded Folder blueprint is meant to be executed by a Service Account (or a
|
||||||
- `roles/resourcemanager.folderAdmin`
|
- `roles/resourcemanager.folderAdmin`
|
||||||
- `roles/resourcemanager.projectCreator`
|
- `roles/resourcemanager.projectCreator`
|
||||||
|
|
||||||
The shielded Folfer blueprint assumes [groups described](#user-groups) are created in your GCP organization.
|
The shielded Folder blueprint assumes [groups described](#user-groups) are created in your GCP organization.
|
||||||
|
|
||||||
### Variable configuration PIPPO
|
### Variable configuration PIPPO
|
||||||
|
|
||||||
|
@ -167,8 +167,8 @@ terraform apply
|
||||||
| [log_locations](variables.tf#L86) | Optional locations for GCS, BigQuery, and logging buckets created here. | <code title="object({ bq = optional(string, "europe") storage = optional(string, "europe") logging = optional(string, "global") pubsub = optional(string, "global") })">object({…})</code> | | <code title="{ bq = "europe" storage = "europe" logging = "global" pubsub = null }">{…}</code> |
|
| [log_locations](variables.tf#L86) | Optional locations for GCS, BigQuery, and logging buckets created here. | <code title="object({ bq = optional(string, "europe") storage = optional(string, "europe") logging = optional(string, "global") pubsub = optional(string, "global") })">object({…})</code> | | <code title="{ bq = "europe" storage = "europe" logging = "global" pubsub = null }">{…}</code> |
|
||||||
| [log_sinks](variables.tf#L103) | Org-level log sinks, in name => {type, filter} format. | <code title="map(object({ filter = string type = string }))">map(object({…}))</code> | | <code title="{ audit-logs = { filter = "logName:\"/logs/cloudaudit.googleapis.com%2Factivity\" OR logName:\"/logs/cloudaudit.googleapis.com%2Fsystem_event\"" type = "bigquery" } vpc-sc = { filter = "protoPayload.metadata.@type=\"type.googleapis.com/google.cloud.audit.VpcServiceControlAuditMetadata\"" type = "bigquery" } }">{…}</code> |
|
| [log_sinks](variables.tf#L103) | Org-level log sinks, in name => {type, filter} format. | <code title="map(object({ filter = string type = string }))">map(object({…}))</code> | | <code title="{ audit-logs = { filter = "logName:\"/logs/cloudaudit.googleapis.com%2Factivity\" OR logName:\"/logs/cloudaudit.googleapis.com%2Fsystem_event\"" type = "bigquery" } vpc-sc = { filter = "protoPayload.metadata.@type=\"type.googleapis.com/google.cloud.audit.VpcServiceControlAuditMetadata\"" type = "bigquery" } }">{…}</code> |
|
||||||
| [vpc_sc_access_levels](variables.tf#L161) | VPC SC access level definitions. | <code title="map(object({ combining_function = optional(string) conditions = optional(list(object({ device_policy = optional(object({ allowed_device_management_levels = optional(list(string)) allowed_encryption_statuses = optional(list(string)) require_admin_approval = bool require_corp_owned = bool require_screen_lock = optional(bool) os_constraints = optional(list(object({ os_type = string minimum_version = optional(string) require_verified_chrome_os = optional(bool) }))) })) ip_subnetworks = optional(list(string), []) members = optional(list(string), []) negate = optional(bool) regions = optional(list(string), []) required_access_levels = optional(list(string), []) })), []) description = optional(string) }))">map(object({…}))</code> | | <code>{}</code> |
|
| [vpc_sc_access_levels](variables.tf#L161) | VPC SC access level definitions. | <code title="map(object({ combining_function = optional(string) conditions = optional(list(object({ device_policy = optional(object({ allowed_device_management_levels = optional(list(string)) allowed_encryption_statuses = optional(list(string)) require_admin_approval = bool require_corp_owned = bool require_screen_lock = optional(bool) os_constraints = optional(list(object({ os_type = string minimum_version = optional(string) require_verified_chrome_os = optional(bool) }))) })) ip_subnetworks = optional(list(string), []) members = optional(list(string), []) negate = optional(bool) regions = optional(list(string), []) required_access_levels = optional(list(string), []) })), []) description = optional(string) }))">map(object({…}))</code> | | <code>{}</code> |
|
||||||
| [vpc_sc_egress_policies](variables.tf#L190) | VPC SC egress policy defnitions. | <code title="map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") identities = optional(list(string)) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) resource_type_external = optional(bool, false) }) }))">map(object({…}))</code> | | <code>{}</code> |
|
| [vpc_sc_egress_policies](variables.tf#L190) | VPC SC egress policy definitions. | <code title="map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") identities = optional(list(string)) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) resource_type_external = optional(bool, false) }) }))">map(object({…}))</code> | | <code>{}</code> |
|
||||||
| [vpc_sc_ingress_policies](variables.tf#L210) | VPC SC ingress policy defnitions. | <code title="map(object({ from = object({ access_levels = optional(list(string), []) identity_type = optional(string) identities = optional(list(string)) resources = optional(list(string), []) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) }) }))">map(object({…}))</code> | | <code>{}</code> |
|
| [vpc_sc_ingress_policies](variables.tf#L210) | VPC SC ingress policy definitions. | <code title="map(object({ from = object({ access_levels = optional(list(string), []) identity_type = optional(string) identities = optional(list(string)) resources = optional(list(string), []) }) to = object({ operations = optional(list(object({ method_selectors = optional(list(string)) service_name = string })), []) resources = optional(list(string)) }) }))">map(object({…}))</code> | | <code>{}</code> |
|
||||||
|
|
||||||
## Outputs
|
## Outputs
|
||||||
|
|
||||||
|
@ -176,9 +176,9 @@ terraform apply
|
||||||
|---|---|:---:|
|
|---|---|:---:|
|
||||||
| [folders](outputs.tf#L15) | Folders id. | |
|
| [folders](outputs.tf#L15) | Folders id. | |
|
||||||
| [folders_sink_writer_identities](outputs.tf#L23) | Folders id. | |
|
| [folders_sink_writer_identities](outputs.tf#L23) | Folders id. | |
|
||||||
|
| [kms_keys](outputs.tf#L31) | Cloud KMS encryption keys created. | |
|
||||||
|
|
||||||
<!-- END TFDOC -->
|
<!-- END TFDOC -->
|
||||||
|
|
||||||
## Test
|
## Test
|
||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
|
|
|
@ -81,7 +81,7 @@ module "sec-kms" {
|
||||||
project_id = module.sec-project[0].project_id
|
project_id = module.sec-project[0].project_id
|
||||||
keyring = {
|
keyring = {
|
||||||
location = each.key
|
location = each.key
|
||||||
name = "${each.key}"
|
name = "sec-${each.key}"
|
||||||
}
|
}
|
||||||
# rename to `key_iam` to switch to authoritative bindings
|
# rename to `key_iam` to switch to authoritative bindings
|
||||||
key_iam_additive = {
|
key_iam_additive = {
|
||||||
|
@ -96,7 +96,7 @@ module "log-kms" {
|
||||||
project_id = module.sec-project[0].project_id
|
project_id = module.sec-project[0].project_id
|
||||||
keyring = {
|
keyring = {
|
||||||
location = each.key
|
location = each.key
|
||||||
name = "${each.key}"
|
name = "log-${each.key}"
|
||||||
}
|
}
|
||||||
keys = local.kms_log_locations_keys[each.key]
|
keys = local.kms_log_locations_keys[each.key]
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ module "log-export-dataset" {
|
||||||
id = "${var.prefix}_audit_export"
|
id = "${var.prefix}_audit_export"
|
||||||
friendly_name = "Audit logs export."
|
friendly_name = "Audit logs export."
|
||||||
location = replace(var.log_locations.bq, "europe", "EU")
|
location = replace(var.log_locations.bq, "europe", "EU")
|
||||||
encryption_key = var.enable_features.encryption ? module.log-kms[var.log_locations.bq].keys["bq"].id : false
|
encryption_key = var.enable_features.encryption ? module.log-kms[var.log_locations.bq].keys["bq"].id : null
|
||||||
}
|
}
|
||||||
|
|
||||||
module "log-export-gcs" {
|
module "log-export-gcs" {
|
||||||
|
|
|
@ -28,3 +28,7 @@ output "folders_sink_writer_identities" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "kms_keys" {
|
||||||
|
description = "Cloud KMS encryption keys created."
|
||||||
|
value = { for k, v in module.sec-kms : k => v.key_ids }
|
||||||
|
}
|
||||||
|
|
|
@ -188,7 +188,7 @@ variable "vpc_sc_access_levels" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vpc_sc_egress_policies" {
|
variable "vpc_sc_egress_policies" {
|
||||||
description = "VPC SC egress policy defnitions."
|
description = "VPC SC egress policy definitions."
|
||||||
type = map(object({
|
type = map(object({
|
||||||
from = object({
|
from = object({
|
||||||
identity_type = optional(string, "ANY_IDENTITY")
|
identity_type = optional(string, "ANY_IDENTITY")
|
||||||
|
@ -208,7 +208,7 @@ variable "vpc_sc_egress_policies" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vpc_sc_ingress_policies" {
|
variable "vpc_sc_ingress_policies" {
|
||||||
description = "VPC SC ingress policy defnitions."
|
description = "VPC SC ingress policy definitions."
|
||||||
type = map(object({
|
type = map(object({
|
||||||
from = object({
|
from = object({
|
||||||
access_levels = optional(list(string), [])
|
access_levels = optional(list(string), [])
|
||||||
|
|
|
@ -1,24 +1,30 @@
|
||||||
# MLOps with Vertex AI
|
# MLOps with Vertex AI
|
||||||
|
|
||||||
## Introduction
|
## Tagline
|
||||||
|
|
||||||
|
Create a Vertex AI environment needed for MLOps.
|
||||||
|
|
||||||
|
## Detailed
|
||||||
|
|
||||||
This example implements the infrastructure required to deploy an end-to-end [MLOps process](https://services.google.com/fh/files/misc/practitioners_guide_to_mlops_whitepaper.pdf) using [Vertex AI](https://cloud.google.com/vertex-ai) platform.
|
This example implements the infrastructure required to deploy an end-to-end [MLOps process](https://services.google.com/fh/files/misc/practitioners_guide_to_mlops_whitepaper.pdf) using [Vertex AI](https://cloud.google.com/vertex-ai) platform.
|
||||||
|
|
||||||
## GCP resources
|
## Architecture
|
||||||
|
|
||||||
The blueprint will deploy all the required resources to have a fully functional MLOPs environment containing:
|
The blueprint will deploy all the required resources to have a fully functional MLOPs environment containing:
|
||||||
|
|
||||||
- Vertex Workbench (for the experimentation environment)
|
1. Vertex Workbench (for the experimentation environment).
|
||||||
- GCP Project (optional) to host all the resources
|
1. GCP Project (optional) to host all the resources.
|
||||||
- Isolated VPC network and a subnet to be used by Vertex and Dataflow. Alternatively, an external Shared VPC can be configured using the `network_config`variable.
|
1. Isolated VPC network and a subnet to be used by Vertex and Dataflow. Alternatively, an external Shared VPC can be configured using the `network_config`variable.
|
||||||
- Firewall rule to allow the internal subnet communication required by Dataflow
|
1. Firewall rule to allow the internal subnet communication required by Dataflow.
|
||||||
- Cloud NAT required to reach the internet from the different computing resources (Vertex and Dataflow)
|
1. Cloud NAT required to reach the internet from the different computing resources (Vertex and Dataflow).
|
||||||
- GCS buckets to host Vertex AI and Cloud Build Artifacts. By default the buckets will be regional and should match the Vertex AI region for the different resources (i.e. Vertex Managed Dataset) and processes (i.e. Vertex trainining)
|
1. GCS buckets to host Vertex AI and Cloud Build Artifacts. By default the buckets will be regional and should match the Vertex AI region for the different resources (i.e. Vertex Managed Dataset) and processes (i.e. Vertex trainining).
|
||||||
- BigQuery Dataset where the training data will be stored. This is optional, since the training data could be already hosted in an existing BigQuery dataset.
|
1. BigQuery Dataset where the training data will be stored. This is optional, since the training data could be already hosted in an existing BigQuery dataset.
|
||||||
- Artifact Registry Docker repository to host the custom images.
|
1. Artifact Registry Docker repository to host the custom images.
|
||||||
- Service account (`mlops-[env]@`) with the minimum permissions required by Vertex AI and Dataflow (if this service is used inside of the Vertex AI Pipeline).
|
1. Service account (`PREFIX-sa-mlops`) with the minimum permissions required by Vertex AI and Dataflow (if this service is used inside of the Vertex AI Pipeline).
|
||||||
- Service account (`github@`) to be used by Workload Identity Federation, to federate Github identity (Optional).
|
1. Service account (`PREFIX-sa-github@`) to be used by Workload Identity Federation, to federate Github identity (Optional).
|
||||||
- Secret to store the Github SSH key to get access the CICD code repo.
|
1. Secret Manager to store the Github SSH key to get access the CICD code repo.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
![MLOps project description](./images/mlops_projects.png "MLOps project description")
|
![MLOps project description](./images/mlops_projects.png "MLOps project description")
|
||||||
|
|
||||||
|
@ -30,7 +36,7 @@ Assign roles relying on User groups is a way to decouple the final set of permis
|
||||||
|
|
||||||
We use the following groups to control access to resources:
|
We use the following groups to control access to resources:
|
||||||
|
|
||||||
- *Data Scientits* (gcp-ml-ds@<company.org>). They manage notebooks and create ML pipelines.
|
- *Data Scientist* (gcp-ml-ds@<company.org>). They manage notebooks and create ML pipelines.
|
||||||
- *ML Engineers* (gcp-ml-eng@<company.org>). They manage the different Vertex resources.
|
- *ML Engineers* (gcp-ml-eng@<company.org>). They manage the different Vertex resources.
|
||||||
- *ML Viewer* (gcp-ml-eng@<company.org>). Group with wiewer permission for the different resources.
|
- *ML Viewer* (gcp-ml-eng@<company.org>). Group with wiewer permission for the different resources.
|
||||||
|
|
||||||
|
@ -46,69 +52,80 @@ Please note that these groups are not suitable for production grade environments
|
||||||
## What's next?
|
## What's next?
|
||||||
|
|
||||||
This blueprint can be used as a building block for setting up an end2end ML Ops solution. As next step, you can follow this [guide](https://cloud.google.com/architecture/architecture-for-mlops-using-tfx-kubeflow-pipelines-and-cloud-build) to setup a Vertex AI pipeline and run it on the deployed infraestructure.
|
This blueprint can be used as a building block for setting up an end2end ML Ops solution. As next step, you can follow this [guide](https://cloud.google.com/architecture/architecture-for-mlops-using-tfx-kubeflow-pipelines-and-cloud-build) to setup a Vertex AI pipeline and run it on the deployed infraestructure.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Basic usage of this module is as follows:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
module "test" {
|
||||||
|
source = "./fabric/blueprints/data-solutions/vertex-mlops/"
|
||||||
|
notebooks = {
|
||||||
|
"myworkbench" = {
|
||||||
|
type = "USER_MANAGED"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefix = "pref-dev"
|
||||||
|
project_config = {
|
||||||
|
billing_account_id = "000000-123456-123456"
|
||||||
|
parent = "folders/111111111111"
|
||||||
|
project_id = "test-dev"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# tftest modules=11 resources=60
|
||||||
|
```
|
||||||
<!-- BEGIN TFDOC -->
|
<!-- BEGIN TFDOC -->
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
|
||||||
| name | description | type | required | default |
|
| name | description | type | required | default |
|
||||||
|---|---|:---:|:---:|:---:|
|
|---|---|:---:|:---:|:---:|
|
||||||
| [project_id](variables.tf#L101) | Project id, references existing project if `project_create` is null. | <code>string</code> | ✓ | |
|
| [notebooks](variables.tf#L69) | Vertex AI workbenches to be deployed. Service Account runtime/instances deployed. | <code title="map(object({ type = string machine_type = optional(string, "n1-standard-4") internal_ip_only = optional(bool, true) idle_shutdown = optional(bool, false) owner = optional(string) }))">map(object({…}))</code> | ✓ | |
|
||||||
|
| [project_config](variables.tf#L96) | Provide 'billing_account_id' value if project creation is needed, uses existing 'project_id' if null. Parent is in 'folders/nnn' or 'organizations/nnn' format. | <code title="object({ billing_account_id = optional(string) parent = optional(string) project_id = string })">object({…})</code> | ✓ | |
|
||||||
| [bucket_name](variables.tf#L18) | GCS bucket name to store the Vertex AI artifacts. | <code>string</code> | | <code>null</code> |
|
| [bucket_name](variables.tf#L18) | GCS bucket name to store the Vertex AI artifacts. | <code>string</code> | | <code>null</code> |
|
||||||
| [dataset_name](variables.tf#L24) | BigQuery Dataset to store the training data. | <code>string</code> | | <code>null</code> |
|
| [dataset_name](variables.tf#L24) | BigQuery Dataset to store the training data. | <code>string</code> | | <code>null</code> |
|
||||||
| [groups](variables.tf#L30) | Name of the groups (name@domain.org) to apply opinionated IAM permissions. | <code title="object({ gcp-ml-ds = string gcp-ml-eng = string gcp-ml-viewer = string })">object({…})</code> | | <code title="{ gcp-ml-ds = null gcp-ml-eng = null gcp-ml-viewer = null }">{…}</code> |
|
| [groups](variables.tf#L30) | Name of the groups (name@domain.org) to apply opinionated IAM permissions. | <code title="object({ gcp-ml-ds = optional(string) gcp-ml-eng = optional(string) gcp-ml-viewer = optional(string) })">object({…})</code> | | <code>{}</code> |
|
||||||
| [identity_pool_claims](variables.tf#L45) | Claims to be used by Workload Identity Federation (i.e.: attribute.repository/ORGANIZATION/REPO). If a not null value is provided, then google_iam_workload_identity_pool resource will be created. | <code>string</code> | | <code>null</code> |
|
| [identity_pool_claims](variables.tf#L41) | Claims to be used by Workload Identity Federation (i.e.: attribute.repository/ORGANIZATION/REPO). If a not null value is provided, then google_iam_workload_identity_pool resource will be created. | <code>string</code> | | <code>null</code> |
|
||||||
| [labels](variables.tf#L51) | Labels to be assigned at project level. | <code>map(string)</code> | | <code>{}</code> |
|
| [labels](variables.tf#L47) | Labels to be assigned at project level. | <code>map(string)</code> | | <code>{}</code> |
|
||||||
| [location](variables.tf#L57) | Location used for multi-regional resources. | <code>string</code> | | <code>"eu"</code> |
|
| [location](variables.tf#L53) | Location used for multi-regional resources. | <code>string</code> | | <code>"eu"</code> |
|
||||||
| [network_config](variables.tf#L63) | Shared VPC network configurations to use. If null networks will be created in projects with preconfigured values. | <code title="object({ host_project = string network_self_link = string subnet_self_link = string })">object({…})</code> | | <code>null</code> |
|
| [network_config](variables.tf#L59) | Shared VPC network configurations to use. If null networks will be created in projects with preconfigured values. | <code title="object({ host_project = string network_self_link = string subnet_self_link = string })">object({…})</code> | | <code>null</code> |
|
||||||
| [notebooks](variables.tf#L73) | Vertex AI workbenchs to be deployed. | <code title="map(object({ owner = string region = string subnet = string internal_ip_only = optional(bool, false) idle_shutdown = optional(bool) }))">map(object({…}))</code> | | <code>{}</code> |
|
| [prefix](variables.tf#L90) | Prefix used for the project id. | <code>string</code> | | <code>null</code> |
|
||||||
| [prefix](variables.tf#L86) | Prefix used for the project id. | <code>string</code> | | <code>null</code> |
|
| [region](variables.tf#L110) | Region used for regional resources. | <code>string</code> | | <code>"europe-west4"</code> |
|
||||||
| [project_create](variables.tf#L92) | Provide values if project creation is needed, uses existing project if null. Parent is in 'folders/nnn' or 'organizations/nnn' format. | <code title="object({ billing_account_id = string parent = string })">object({…})</code> | | <code>null</code> |
|
| [repo_name](variables.tf#L116) | Cloud Source Repository name. null to avoid to create it. | <code>string</code> | | <code>null</code> |
|
||||||
| [project_services](variables.tf#L106) | List of core services enabled on all projects. | <code>list(string)</code> | | <code title="[ "aiplatform.googleapis.com", "artifactregistry.googleapis.com", "bigquery.googleapis.com", "cloudbuild.googleapis.com", "compute.googleapis.com", "datacatalog.googleapis.com", "dataflow.googleapis.com", "iam.googleapis.com", "monitoring.googleapis.com", "notebooks.googleapis.com", "secretmanager.googleapis.com", "servicenetworking.googleapis.com", "serviceusage.googleapis.com" ]">[…]</code> |
|
| [service_encryption_keys](variables.tf#L122) | Cloud KMS to use to encrypt different services. Key location should match service region. | <code title="object({ aiplatform = optional(string) bq = optional(string) notebooks = optional(string) secretmanager = optional(string) storage = optional(string) })">object({…})</code> | | <code>{}</code> |
|
||||||
| [region](variables.tf#L126) | Region used for regional resources. | <code>string</code> | | <code>"europe-west4"</code> |
|
|
||||||
| [repo_name](variables.tf#L132) | Cloud Source Repository name. null to avoid to create it. | <code>string</code> | | <code>null</code> |
|
|
||||||
| [sa_mlops_name](variables.tf#L138) | Name for the MLOPs Service Account. | <code>string</code> | | <code>"sa-mlops"</code> |
|
|
||||||
|
|
||||||
## Outputs
|
## Outputs
|
||||||
|
|
||||||
| name | description | sensitive |
|
| name | description | sensitive |
|
||||||
|---|---|:---:|
|
|---|---|:---:|
|
||||||
| [github](outputs.tf#L33) | Github Configuration. | |
|
| [github](outputs.tf#L30) | Github Configuration. | |
|
||||||
| [notebook](outputs.tf#L39) | Vertex AI managed notebook details. | |
|
| [notebook](outputs.tf#L35) | Vertex AI notebooks ids. | |
|
||||||
| [project](outputs.tf#L44) | The project resource as return by the `project` module. | |
|
| [project_id](outputs.tf#L43) | Project ID. | |
|
||||||
| [project_id](outputs.tf#L49) | Project ID. | |
|
|
||||||
|
|
||||||
<!-- END TFDOC -->
|
<!-- END TFDOC -->
|
||||||
|
|
||||||
## TODO
|
|
||||||
|
|
||||||
- Add support for User Managed Notebooks, SA permission option and non default SA for Single User mode.
|
|
||||||
- Improve default naming for local VPC and Cloud NAT
|
|
||||||
|
|
||||||
## Test
|
## Test
|
||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
module "test" {
|
module "test" {
|
||||||
source = "./fabric/blueprints/data-solutions/vertex-mlops/"
|
source = "./fabric/blueprints/data-solutions/vertex-mlops/"
|
||||||
labels = {
|
labels = {
|
||||||
"env" : "dev",
|
"env" = "dev",
|
||||||
"team" : "ml"
|
"team" = "ml"
|
||||||
}
|
}
|
||||||
bucket_name = "test-dev"
|
bucket_name = "gcs-test"
|
||||||
dataset_name = "test"
|
dataset_name = "bq-test"
|
||||||
identity_pool_claims = "attribute.repository/ORGANIZATION/REPO"
|
identity_pool_claims = "attribute.repository/ORGANIZATION/REPO"
|
||||||
notebooks = {
|
notebooks = {
|
||||||
"myworkbench" : {
|
"myworkbench" = {
|
||||||
"owner" : "user@example.com",
|
type = "USER_MANAGED"
|
||||||
"region" : "europe-west4",
|
|
||||||
"subnet" : "default",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prefix = "pref"
|
prefix = "pref-dev"
|
||||||
project_id = "test-dev"
|
project_config = {
|
||||||
project_create = {
|
|
||||||
billing_account_id = "000000-123456-123456"
|
billing_account_id = "000000-123456-123456"
|
||||||
parent = "folders/111111111111"
|
parent = "folders/111111111111"
|
||||||
|
project_id = "test-dev"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# tftest modules=12 resources=57
|
# tftest modules=13 resources=65
|
||||||
```
|
```
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2023 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
provider_meta "google" {
|
||||||
|
module_name = "blueprints/terraform/fabric-blueprints:vertex-mlops/v21.0.0"
|
||||||
|
}
|
||||||
|
provider_meta "google-beta" {
|
||||||
|
module_name = "blueprints/terraform/fabric-blueprints:vertex-mlops/v21.0.0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -44,14 +44,11 @@ module "artifact_registry" {
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
location = var.region
|
location = var.region
|
||||||
format = "DOCKER"
|
format = "DOCKER"
|
||||||
# iam = {
|
|
||||||
# "roles/artifactregistry.admin" = ["group:cicd@example.com"]
|
|
||||||
# }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module "service-account-github" {
|
module "service-account-github" {
|
||||||
source = "../../../modules/iam-service-account"
|
source = "../../../modules/iam-service-account"
|
||||||
name = "sa-github"
|
name = "${var.prefix}-sa-github"
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
iam = var.identity_pool_claims == null ? {} : { "roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.github_pool[0].name}/${var.identity_pool_claims}"] }
|
iam = var.identity_pool_claims == null ? {} : { "roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.github_pool[0].name}/${var.identity_pool_claims}"] }
|
||||||
}
|
}
|
||||||
|
@ -63,6 +60,9 @@ module "secret-manager" {
|
||||||
secrets = {
|
secrets = {
|
||||||
github-key = [var.region]
|
github-key = [var.region]
|
||||||
}
|
}
|
||||||
|
encryption_key = {
|
||||||
|
"${var.region}" = var.service_encryption_keys.secretmanager
|
||||||
|
}
|
||||||
iam = {
|
iam = {
|
||||||
github-key = {
|
github-key = {
|
||||||
"roles/secretmanager.secretAccessor" = [
|
"roles/secretmanager.secretAccessor" = [
|
||||||
|
|
|
@ -64,7 +64,6 @@ locals {
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
service_encryption_keys = var.service_encryption_keys
|
|
||||||
shared_vpc_project = try(var.network_config.host_project, null)
|
shared_vpc_project = try(var.network_config.host_project, null)
|
||||||
|
|
||||||
subnet = (
|
subnet = (
|
||||||
|
@ -109,7 +108,7 @@ module "gcs-bucket" {
|
||||||
location = var.region
|
location = var.region
|
||||||
storage_class = "REGIONAL"
|
storage_class = "REGIONAL"
|
||||||
versioning = false
|
versioning = false
|
||||||
encryption_key = try(local.service_encryption_keys.storage, null)
|
encryption_key = var.service_encryption_keys.storage
|
||||||
}
|
}
|
||||||
|
|
||||||
# Default bucket for Cloud Build to prevent error: "'us' violates constraint ‘gcp.resourceLocations’"
|
# Default bucket for Cloud Build to prevent error: "'us' violates constraint ‘gcp.resourceLocations’"
|
||||||
|
@ -117,12 +116,12 @@ module "gcs-bucket" {
|
||||||
module "gcs-bucket-cloudbuild" {
|
module "gcs-bucket-cloudbuild" {
|
||||||
source = "../../../modules/gcs"
|
source = "../../../modules/gcs"
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
name = "${var.project_id}_cloudbuild"
|
name = "${var.prefix}_cloudbuild"
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
location = var.region
|
location = var.region
|
||||||
storage_class = "REGIONAL"
|
storage_class = "REGIONAL"
|
||||||
versioning = false
|
versioning = false
|
||||||
encryption_key = try(local.service_encryption_keys.storage, null)
|
encryption_key = var.service_encryption_keys.storage
|
||||||
}
|
}
|
||||||
|
|
||||||
module "bq-dataset" {
|
module "bq-dataset" {
|
||||||
|
@ -131,7 +130,7 @@ module "bq-dataset" {
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
id = var.dataset_name
|
id = var.dataset_name
|
||||||
location = var.region
|
location = var.region
|
||||||
encryption_key = try(local.service_encryption_keys.bq, null)
|
encryption_key = var.service_encryption_keys.bq
|
||||||
}
|
}
|
||||||
|
|
||||||
module "vpc-local" {
|
module "vpc-local" {
|
||||||
|
@ -190,19 +189,28 @@ module "cloudnat" {
|
||||||
|
|
||||||
module "project" {
|
module "project" {
|
||||||
source = "../../../modules/project"
|
source = "../../../modules/project"
|
||||||
name = var.project_id
|
name = var.project_config.project_id
|
||||||
parent = try(var.project_create.parent, null)
|
parent = var.project_config.parent
|
||||||
billing_account = try(var.project_create.billing_account_id, null)
|
billing_account = var.project_config.billing_account_id
|
||||||
project_create = var.project_create != null
|
project_create = var.project_config.billing_account_id != null
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
group_iam = local.group_iam
|
group_iam = local.group_iam
|
||||||
iam = {
|
iam = {
|
||||||
"roles/aiplatform.user" = [module.service-account-mlops.iam_email]
|
"roles/aiplatform.user" = [
|
||||||
|
module.service-account-mlops.iam_email,
|
||||||
|
module.service-account-notebook.iam_email
|
||||||
|
]
|
||||||
"roles/artifactregistry.reader" = [module.service-account-mlops.iam_email]
|
"roles/artifactregistry.reader" = [module.service-account-mlops.iam_email]
|
||||||
"roles/artifactregistry.writer" = [module.service-account-github.iam_email]
|
"roles/artifactregistry.writer" = [module.service-account-github.iam_email]
|
||||||
"roles/bigquery.dataEditor" = [module.service-account-mlops.iam_email]
|
"roles/bigquery.dataEditor" = [
|
||||||
"roles/bigquery.jobUser" = [module.service-account-mlops.iam_email]
|
module.service-account-mlops.iam_email,
|
||||||
"roles/bigquery.user" = [module.service-account-mlops.iam_email]
|
module.service-account-notebook.iam_email
|
||||||
|
]
|
||||||
|
"roles/bigquery.jobUser" = [
|
||||||
|
module.service-account-mlops.iam_email,
|
||||||
|
module.service-account-notebook.iam_email
|
||||||
|
]
|
||||||
|
"roles/bigquery.user" = [module.service-account-mlops.iam_email, module.service-account-notebook.iam_email]
|
||||||
"roles/cloudbuild.builds.editor" = [
|
"roles/cloudbuild.builds.editor" = [
|
||||||
module.service-account-mlops.iam_email,
|
module.service-account-mlops.iam_email,
|
||||||
module.service-account-github.iam_email
|
module.service-account-github.iam_email
|
||||||
|
@ -213,6 +221,8 @@ module "project" {
|
||||||
"roles/dataflow.worker" = [module.service-account-mlops.iam_email]
|
"roles/dataflow.worker" = [module.service-account-mlops.iam_email]
|
||||||
"roles/iam.serviceAccountUser" = [
|
"roles/iam.serviceAccountUser" = [
|
||||||
module.service-account-mlops.iam_email,
|
module.service-account-mlops.iam_email,
|
||||||
|
module.service-account-notebook.iam_email,
|
||||||
|
module.service-account-github.iam_email,
|
||||||
"serviceAccount:${module.project.service_accounts.robots.cloudbuild}"
|
"serviceAccount:${module.project.service_accounts.robots.cloudbuild}"
|
||||||
]
|
]
|
||||||
"roles/monitoring.metricWriter" = [module.service-account-mlops.iam_email]
|
"roles/monitoring.metricWriter" = [module.service-account-mlops.iam_email]
|
||||||
|
@ -223,28 +233,42 @@ module "project" {
|
||||||
]
|
]
|
||||||
"roles/storage.admin" = [
|
"roles/storage.admin" = [
|
||||||
module.service-account-mlops.iam_email,
|
module.service-account-mlops.iam_email,
|
||||||
module.service-account-github.iam_email
|
module.service-account-github.iam_email,
|
||||||
|
module.service-account-notebook.iam_email
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
labels = var.labels
|
labels = var.labels
|
||||||
|
|
||||||
org_policies = {
|
|
||||||
# Example of applying a project wide policy
|
|
||||||
# "compute.requireOsLogin" = {
|
|
||||||
# rules = [{ enforce = false }]
|
|
||||||
# }
|
|
||||||
}
|
|
||||||
|
|
||||||
service_encryption_key_ids = {
|
service_encryption_key_ids = {
|
||||||
bq = [try(local.service_encryption_keys.bq, null)]
|
aiplatform = [var.service_encryption_keys.aiplatform]
|
||||||
compute = [try(local.service_encryption_keys.compute, null)]
|
bq = [var.service_encryption_keys.bq]
|
||||||
cloudbuild = [try(local.service_encryption_keys.storage, null)]
|
compute = [var.service_encryption_keys.notebooks]
|
||||||
notebooks = [try(local.service_encryption_keys.compute, null)]
|
cloudbuild = [var.service_encryption_keys.storage]
|
||||||
storage = [try(local.service_encryption_keys.storage, null)]
|
notebooks = [var.service_encryption_keys.notebooks]
|
||||||
|
secretmanager = [var.service_encryption_keys.secretmanager]
|
||||||
|
storage = [var.service_encryption_keys.storage]
|
||||||
}
|
}
|
||||||
services = var.project_services
|
|
||||||
|
|
||||||
|
|
||||||
|
services = [
|
||||||
|
"aiplatform.googleapis.com",
|
||||||
|
"artifactregistry.googleapis.com",
|
||||||
|
"bigquery.googleapis.com",
|
||||||
|
"bigquerystorage.googleapis.com",
|
||||||
|
"cloudbuild.googleapis.com",
|
||||||
|
"compute.googleapis.com",
|
||||||
|
"datacatalog.googleapis.com",
|
||||||
|
"dataflow.googleapis.com",
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"ml.googleapis.com",
|
||||||
|
"monitoring.googleapis.com",
|
||||||
|
"notebooks.googleapis.com",
|
||||||
|
"secretmanager.googleapis.com",
|
||||||
|
"servicenetworking.googleapis.com",
|
||||||
|
"serviceusage.googleapis.com",
|
||||||
|
"stackdriver.googleapis.com",
|
||||||
|
"storage.googleapis.com",
|
||||||
|
"storage-component.googleapis.com"
|
||||||
|
]
|
||||||
shared_vpc_service_config = local.shared_vpc_project == null ? null : {
|
shared_vpc_service_config = local.shared_vpc_project == null ? null : {
|
||||||
attach = true
|
attach = true
|
||||||
host_project = local.shared_vpc_project
|
host_project = local.shared_vpc_project
|
||||||
|
@ -254,11 +278,8 @@ module "project" {
|
||||||
|
|
||||||
module "service-account-mlops" {
|
module "service-account-mlops" {
|
||||||
source = "../../../modules/iam-service-account"
|
source = "../../../modules/iam-service-account"
|
||||||
name = var.sa_mlops_name
|
name = "${var.prefix}-sa-mlops"
|
||||||
project_id = module.project.project_id
|
project_id = module.project.project_id
|
||||||
iam = {
|
|
||||||
"roles/iam.serviceAccountUser" = [module.service-account-github.iam_email]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_project_iam_member" "shared_vpc" {
|
resource "google_project_iam_member" "shared_vpc" {
|
||||||
|
@ -268,11 +289,8 @@ resource "google_project_iam_member" "shared_vpc" {
|
||||||
member = "serviceAccount:${module.project.service_accounts.robots.notebooks}"
|
member = "serviceAccount:${module.project.service_accounts.robots.notebooks}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "google_sourcerepo_repository" "code-repo" {
|
resource "google_sourcerepo_repository" "code-repo" {
|
||||||
count = var.repo_name == null ? 0 : 1
|
count = var.repo_name == null ? 0 : 1
|
||||||
name = var.repo_name
|
name = var.repo_name
|
||||||
project = module.project.project_id
|
project = module.project.project_id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,174 @@
|
||||||
|
# Copyright 2023 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
apiVersion: blueprints.cloud.google.com/v1alpha1
|
||||||
|
kind: BlueprintMetadata
|
||||||
|
metadata:
|
||||||
|
name: vertex-mlops
|
||||||
|
spec:
|
||||||
|
info:
|
||||||
|
title: MLOps with Vertex AI
|
||||||
|
source:
|
||||||
|
repo: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric.git
|
||||||
|
dir: blueprints/data-solutions/vertex-mlops
|
||||||
|
sourceType: git
|
||||||
|
version: 21.0.0
|
||||||
|
actuationTool:
|
||||||
|
type: Terraform
|
||||||
|
version: '>= 1.3.0'
|
||||||
|
description:
|
||||||
|
tagline: Create a Vertex AI environment needed for MLOps.
|
||||||
|
detailed: |-
|
||||||
|
This example implements the infrastructure required to deploy an end-to-end MLOps process using Vertex AI platform.
|
||||||
|
architecture:
|
||||||
|
- Vertex Workbench (for the experimentation environment).
|
||||||
|
- GCP Project (optional) to host all the resources.
|
||||||
|
- Isolated VPC network and a subnet to be used by Vertex and Dataflow. Alternatively, an external Shared VPC can be configured using the `network_config`variable.
|
||||||
|
- Firewall rule to allow the internal subnet communication required by Dataflow.
|
||||||
|
- Cloud NAT required to reach the internet from the different computing resources (Vertex and Dataflow).
|
||||||
|
- GCS buckets to host Vertex AI and Cloud Build Artifacts. By default the buckets will be regional and should match the Vertex AI region for the different resources (i.e. Vertex Managed Dataset) and processes (i.e. Vertex trainining).
|
||||||
|
- BigQuery Dataset where the training data will be stored. This is optional, since the training data could be already hosted in an existing BigQuery dataset.
|
||||||
|
- Artifact Registry Docker repository to host the custom images.
|
||||||
|
- Service account (`PREFIX-sa-mlops`) with the minimum permissions required by Vertex AI and Dataflow (if this service is used inside of the Vertex AI Pipeline).
|
||||||
|
- Service account (`PREFIX-sa-github@`) to be used by Workload Identity Federation, to federate Github identity (Optional).
|
||||||
|
- Secret Manager to store the Github SSH key to get access the CICD code repo.
|
||||||
|
content:
|
||||||
|
documentation:
|
||||||
|
- title: Architecture Diagram
|
||||||
|
url: https://github.com/GoogleCloudPlatform/cloud-foundation-fabric/blob/master/blueprints/data-solutions/vertex-mlops/images/mlops_projects.png
|
||||||
|
interfaces:
|
||||||
|
variables:
|
||||||
|
- name: notebooks
|
||||||
|
description: Vertex AI workbenches to be deployed. Service Account runtime/instances deployed.
|
||||||
|
type: |-
|
||||||
|
map(object({
|
||||||
|
type = string
|
||||||
|
machine_type = optional(string, "n1-standard-4")
|
||||||
|
internal_ip_only = optional(bool, true)
|
||||||
|
idle_shutdown = optional(bool, false)
|
||||||
|
owner = optional(string)
|
||||||
|
}))
|
||||||
|
required: true
|
||||||
|
- name: project_config
|
||||||
|
description: Provide 'billing_account_id' value if project creation is needed, uses existing 'project_id' if null. Parent is in 'folders/nnn' or 'organizations/nnn' format.
|
||||||
|
type: |-
|
||||||
|
object({
|
||||||
|
billing_account_id = optional(string)
|
||||||
|
parent = optional(string)
|
||||||
|
project_id = string
|
||||||
|
})
|
||||||
|
required: true
|
||||||
|
- name: bucket_name
|
||||||
|
description: GCS bucket name to store the Vertex AI artifacts.
|
||||||
|
type: string
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: dataset_name
|
||||||
|
description: BigQuery Dataset to store the training data.
|
||||||
|
type: string
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: groups
|
||||||
|
description: Name of the groups (group_name@domain.org) to apply opinionated IAM permissions.
|
||||||
|
type: |-
|
||||||
|
object({
|
||||||
|
gcp-ml-ds = optional(string),
|
||||||
|
gcp-ml-eng = optional(string),
|
||||||
|
gcp-ml-viewer = optional(string)
|
||||||
|
})
|
||||||
|
default: {}
|
||||||
|
required: false
|
||||||
|
- name: identity_pool_claims
|
||||||
|
description: "Claims to be used by Workload Identity Federation (i.e.: attribute.repository/ORGANIZATION/REPO). If a not null value is provided, then google_iam_workload_identity_pool resource will be created."
|
||||||
|
type: string
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: labels
|
||||||
|
description: Labels to be assigned at project level.
|
||||||
|
type: map(string)
|
||||||
|
required: false
|
||||||
|
default: {}
|
||||||
|
- name: location
|
||||||
|
description: Location used for multi-regional resources.
|
||||||
|
type: string
|
||||||
|
default: eu
|
||||||
|
required: false
|
||||||
|
- name: network_config
|
||||||
|
description: Shared VPC network configurations to use. If null networks will be created in projects with preconfigured values.
|
||||||
|
type: |-
|
||||||
|
object({
|
||||||
|
host_project = string
|
||||||
|
network_self_link = string
|
||||||
|
subnet_self_link = string
|
||||||
|
})
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: prefix
|
||||||
|
description: Prefix used for the project id.
|
||||||
|
type: string
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: region
|
||||||
|
description: Region used for regional resources.
|
||||||
|
type: string
|
||||||
|
default: europe-west4
|
||||||
|
required: false
|
||||||
|
- name: repo_name
|
||||||
|
description: Cloud Source Repository name. null to avoid to create it.
|
||||||
|
type: string
|
||||||
|
default: null
|
||||||
|
required: false
|
||||||
|
- name: service_encryption_keys
|
||||||
|
description: Cloud KMS to use to encrypt different services. Key location should match service region.
|
||||||
|
type: |-
|
||||||
|
object({
|
||||||
|
aiplatform = optional(string)
|
||||||
|
bq = optional(string)
|
||||||
|
notebooks = optional(string)
|
||||||
|
secretmanager = optional(string)
|
||||||
|
storage = optional(string)
|
||||||
|
})
|
||||||
|
default: {}
|
||||||
|
required: false
|
||||||
|
outputs:
|
||||||
|
- name: github
|
||||||
|
description: Github Configuration.
|
||||||
|
- name: notebook
|
||||||
|
description: Vertex AI notebooks ids.
|
||||||
|
- name: project
|
||||||
|
description: The project resource as return by the project module.
|
||||||
|
requirements:
|
||||||
|
roles:
|
||||||
|
- level: Project
|
||||||
|
roles:
|
||||||
|
- roles/owner
|
||||||
|
services:
|
||||||
|
- aiplatform.googleapis.com
|
||||||
|
- artifactregistry.googleapis.com
|
||||||
|
- bigquery.googleapis.com
|
||||||
|
- bigquerystorage.googleapis.com
|
||||||
|
- cloudbuild.googleapis.com
|
||||||
|
- compute.googleapis.com
|
||||||
|
- datacatalog.googleapis.com
|
||||||
|
- dataflow.googleapis.com
|
||||||
|
- iam.googleapis.com
|
||||||
|
- ml.googleapis.com
|
||||||
|
- monitoring.googleapis.com
|
||||||
|
- notebooks.googleapis.com
|
||||||
|
- secretmanager.googleapis.com
|
||||||
|
- servicenetworking.googleapis.com
|
||||||
|
- serviceusage.googleapis.com
|
||||||
|
- stackdriver.googleapis.com
|
||||||
|
- storage.googleapis.com
|
||||||
|
- storage-component.googleapis.com
|
|
@ -1,60 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright 2022 Google LLC
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
resource "google_notebooks_runtime" "runtime" {
|
|
||||||
for_each = var.notebooks
|
|
||||||
name = each.key
|
|
||||||
|
|
||||||
project = module.project.project_id
|
|
||||||
location = var.notebooks[each.key].region
|
|
||||||
access_config {
|
|
||||||
access_type = "SINGLE_USER"
|
|
||||||
runtime_owner = var.notebooks[each.key].owner
|
|
||||||
}
|
|
||||||
software_config {
|
|
||||||
enable_health_monitoring = true
|
|
||||||
idle_shutdown = var.notebooks[each.key].idle_shutdown
|
|
||||||
idle_shutdown_timeout = 1800
|
|
||||||
}
|
|
||||||
virtual_machine {
|
|
||||||
virtual_machine_config {
|
|
||||||
machine_type = "n1-standard-4"
|
|
||||||
network = local.vpc
|
|
||||||
subnet = local.subnet
|
|
||||||
internal_ip_only = var.notebooks[each.key].internal_ip_only
|
|
||||||
dynamic "encryption_config" {
|
|
||||||
for_each = try(local.service_encryption_keys.compute, null) == null ? [] : [1]
|
|
||||||
content {
|
|
||||||
kms_key = local.service_encryption_keys.compute
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metadata = {
|
|
||||||
notebook-disable-nbconvert = "false"
|
|
||||||
notebook-disable-downloads = "false"
|
|
||||||
notebook-disable-terminal = "false"
|
|
||||||
#notebook-disable-root = "true"
|
|
||||||
#notebook-upgrade-schedule = "48 4 * * MON"
|
|
||||||
}
|
|
||||||
data_disk {
|
|
||||||
initialize_params {
|
|
||||||
disk_size_gb = "100"
|
|
||||||
disk_type = "PD_STANDARD"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -14,9 +14,6 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
# TODO(): proper outputs
|
|
||||||
|
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
docker_split = try(split("/", module.artifact_registry.id), null)
|
docker_split = try(split("/", module.artifact_registry.id), null)
|
||||||
docker_repo = try("${local.docker_split[3]}-docker.pkg.dev/${local.docker_split[1]}/${local.docker_split[5]}", null)
|
docker_repo = try("${local.docker_split[3]}-docker.pkg.dev/${local.docker_split[1]}/${local.docker_split[5]}", null)
|
||||||
|
@ -31,19 +28,16 @@ locals {
|
||||||
}
|
}
|
||||||
|
|
||||||
output "github" {
|
output "github" {
|
||||||
|
|
||||||
description = "Github Configuration."
|
description = "Github Configuration."
|
||||||
value = local.gh_config
|
value = local.gh_config
|
||||||
}
|
}
|
||||||
|
|
||||||
output "notebook" {
|
output "notebook" {
|
||||||
description = "Vertex AI managed notebook details."
|
description = "Vertex AI notebooks ids."
|
||||||
value = { for k, v in resource.google_notebooks_runtime.runtime : k => v.id }
|
value = merge(
|
||||||
}
|
{ for k, v in resource.google_notebooks_runtime.runtime : k => v.id },
|
||||||
|
{ for k, v in resource.google_notebooks_instance.playground : k => v.id }
|
||||||
output "project" {
|
)
|
||||||
description = "The project resource as return by the `project` module."
|
|
||||||
value = module.project
|
|
||||||
}
|
}
|
||||||
|
|
||||||
output "project_id" {
|
output "project_id" {
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
bucket_name = "creditcards-dev"
|
|
||||||
dataset_name = "creditcards"
|
|
||||||
identity_pool_claims = "attribute.repository/ORGANIZATION/REPO"
|
|
||||||
labels = {
|
|
||||||
"env" : "dev",
|
|
||||||
"team" : "ml"
|
|
||||||
}
|
|
||||||
notebooks = {
|
|
||||||
"myworkbench" : {
|
|
||||||
"owner" : "user@example.com",
|
|
||||||
"region" : "europe-west4",
|
|
||||||
"subnet" : "default",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prefix = "pref"
|
|
||||||
project_id = "creditcards-dev"
|
|
||||||
project_create = {
|
|
||||||
billing_account_id = "000000-123456-123456"
|
|
||||||
parent = "folders/111111111111"
|
|
||||||
}
|
|
|
@ -30,15 +30,11 @@ variable "dataset_name" {
|
||||||
variable "groups" {
|
variable "groups" {
|
||||||
description = "Name of the groups (name@domain.org) to apply opinionated IAM permissions."
|
description = "Name of the groups (name@domain.org) to apply opinionated IAM permissions."
|
||||||
type = object({
|
type = object({
|
||||||
gcp-ml-ds = string
|
gcp-ml-ds = optional(string)
|
||||||
gcp-ml-eng = string
|
gcp-ml-eng = optional(string)
|
||||||
gcp-ml-viewer = string
|
gcp-ml-viewer = optional(string)
|
||||||
})
|
})
|
||||||
default = {
|
default = {}
|
||||||
gcp-ml-ds = null
|
|
||||||
gcp-ml-eng = null
|
|
||||||
gcp-ml-viewer = null
|
|
||||||
}
|
|
||||||
nullable = false
|
nullable = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,16 +67,24 @@ variable "network_config" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "notebooks" {
|
variable "notebooks" {
|
||||||
description = "Vertex AI workbenchs to be deployed."
|
description = "Vertex AI workbenches to be deployed. Service Account runtime/instances deployed."
|
||||||
type = map(object({
|
type = map(object({
|
||||||
owner = string
|
type = string
|
||||||
region = string
|
machine_type = optional(string, "n1-standard-4")
|
||||||
subnet = string
|
internal_ip_only = optional(bool, true)
|
||||||
internal_ip_only = optional(bool, false)
|
idle_shutdown = optional(bool, false)
|
||||||
idle_shutdown = optional(bool)
|
owner = optional(string)
|
||||||
}))
|
}))
|
||||||
default = {}
|
validation {
|
||||||
nullable = false
|
condition = alltrue([
|
||||||
|
for k, v in var.notebooks : contains(["USER_MANAGED", "MANAGED"], v.type)])
|
||||||
|
error_message = "All `type` must be one of `USER_MANAGED` or `MANAGED`."
|
||||||
|
}
|
||||||
|
validation {
|
||||||
|
condition = alltrue([
|
||||||
|
for k, v in var.notebooks : (v.type == "MANAGED" && try(v.owner != null, false) || v.type == "USER_MANAGED")])
|
||||||
|
error_message = "`owner` must be set for `MANAGED` instances."
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "prefix" {
|
variable "prefix" {
|
||||||
|
@ -89,38 +93,18 @@ variable "prefix" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "project_create" {
|
variable "project_config" {
|
||||||
description = "Provide values if project creation is needed, uses existing project if null. Parent is in 'folders/nnn' or 'organizations/nnn' format."
|
description = "Provide 'billing_account_id' value if project creation is needed, uses existing 'project_id' if null. Parent is in 'folders/nnn' or 'organizations/nnn' format."
|
||||||
type = object({
|
type = object({
|
||||||
billing_account_id = string
|
billing_account_id = optional(string)
|
||||||
parent = string
|
parent = optional(string)
|
||||||
|
project_id = string
|
||||||
})
|
})
|
||||||
default = null
|
validation {
|
||||||
}
|
condition = var.project_config.project_id != null
|
||||||
|
error_message = "Project id must be set."
|
||||||
variable "project_id" {
|
}
|
||||||
description = "Project id, references existing project if `project_create` is null."
|
nullable = false
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "project_services" {
|
|
||||||
description = "List of core services enabled on all projects."
|
|
||||||
type = list(string)
|
|
||||||
default = [
|
|
||||||
"aiplatform.googleapis.com",
|
|
||||||
"artifactregistry.googleapis.com",
|
|
||||||
"bigquery.googleapis.com",
|
|
||||||
"cloudbuild.googleapis.com",
|
|
||||||
"compute.googleapis.com",
|
|
||||||
"datacatalog.googleapis.com",
|
|
||||||
"dataflow.googleapis.com",
|
|
||||||
"iam.googleapis.com",
|
|
||||||
"monitoring.googleapis.com",
|
|
||||||
"notebooks.googleapis.com",
|
|
||||||
"secretmanager.googleapis.com",
|
|
||||||
"servicenetworking.googleapis.com",
|
|
||||||
"serviceusage.googleapis.com"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "region" {
|
variable "region" {
|
||||||
|
@ -135,18 +119,15 @@ variable "repo_name" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "sa_mlops_name" {
|
variable "service_encryption_keys" {
|
||||||
description = "Name for the MLOPs Service Account."
|
|
||||||
type = string
|
|
||||||
default = "sa-mlops"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_encryption_keys" { # service encription key
|
|
||||||
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
description = "Cloud KMS to use to encrypt different services. Key location should match service region."
|
||||||
type = object({
|
type = object({
|
||||||
bq = string
|
aiplatform = optional(string)
|
||||||
compute = string
|
bq = optional(string)
|
||||||
storage = string
|
notebooks = optional(string)
|
||||||
|
secretmanager = optional(string)
|
||||||
|
storage = optional(string)
|
||||||
})
|
})
|
||||||
default = null
|
default = {}
|
||||||
|
nullable = false
|
||||||
}
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
resource "google_vertex_ai_metadata_store" "store" {
|
||||||
|
provider = google-beta
|
||||||
|
project = module.project.project_id
|
||||||
|
name = "default"
|
||||||
|
description = "Vertex Ai Metadata Store"
|
||||||
|
region = var.region
|
||||||
|
dynamic "encryption_spec" {
|
||||||
|
for_each = var.service_encryption_keys.aiplatform == null ? [] : [""]
|
||||||
|
|
||||||
|
content {
|
||||||
|
kms_key_name = var.service_encryption_keys.aiplatform
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# `state` value will be decided automatically based on the result of the configuration
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [state]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "service-account-notebook" {
|
||||||
|
source = "../../../modules/iam-service-account"
|
||||||
|
project_id = module.project.project_id
|
||||||
|
name = "notebook-sa"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_notebooks_runtime" "runtime" {
|
||||||
|
for_each = { for k, v in var.notebooks : k => v if v.type == "MANAGED" }
|
||||||
|
name = "${var.prefix}-${each.key}"
|
||||||
|
project = module.project.project_id
|
||||||
|
location = var.region
|
||||||
|
access_config {
|
||||||
|
access_type = "SINGLE_USER"
|
||||||
|
runtime_owner = try(var.notebooks[each.key].owner, null)
|
||||||
|
}
|
||||||
|
software_config {
|
||||||
|
enable_health_monitoring = true
|
||||||
|
}
|
||||||
|
virtual_machine {
|
||||||
|
virtual_machine_config {
|
||||||
|
machine_type = var.notebooks[each.key].machine_type
|
||||||
|
network = local.vpc
|
||||||
|
subnet = local.subnet
|
||||||
|
internal_ip_only = var.notebooks[each.key].internal_ip_only
|
||||||
|
dynamic "encryption_config" {
|
||||||
|
for_each = var.service_encryption_keys.notebooks == null ? [] : [1]
|
||||||
|
content {
|
||||||
|
kms_key = var.service_encryption_keys.notebooks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metadata = {
|
||||||
|
notebook-disable-nbconvert = "false"
|
||||||
|
notebook-disable-downloads = "true"
|
||||||
|
notebook-disable-terminal = "false"
|
||||||
|
notebook-disable-root = "true"
|
||||||
|
}
|
||||||
|
data_disk {
|
||||||
|
initialize_params {
|
||||||
|
disk_size_gb = "100"
|
||||||
|
disk_type = "PD_STANDARD"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_notebooks_instance" "playground" {
|
||||||
|
for_each = { for k, v in var.notebooks : k => v if v.type == "USER_MANAGED" }
|
||||||
|
name = "${var.prefix}-${each.key}"
|
||||||
|
location = "${var.region}-b"
|
||||||
|
machine_type = var.notebooks[each.key].machine_type
|
||||||
|
project = module.project.project_id
|
||||||
|
|
||||||
|
container_image {
|
||||||
|
repository = "gcr.io/deeplearning-platform-release/base-cpu"
|
||||||
|
tag = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
install_gpu_driver = true
|
||||||
|
boot_disk_type = "PD_SSD"
|
||||||
|
boot_disk_size_gb = 110
|
||||||
|
disk_encryption = var.service_encryption_keys.notebooks != null ? "CMEK" : null
|
||||||
|
kms_key = var.service_encryption_keys.notebooks
|
||||||
|
|
||||||
|
no_public_ip = var.notebooks[each.key].internal_ip_only
|
||||||
|
no_proxy_access = false
|
||||||
|
|
||||||
|
network = local.vpc
|
||||||
|
subnet = local.subnet
|
||||||
|
|
||||||
|
instance_owners = try(tolist(var.notebooks[each.key].owner), null)
|
||||||
|
service_account = module.service-account-notebook.email
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
notebook-disable-nbconvert = "false"
|
||||||
|
notebook-disable-downloads = "false"
|
||||||
|
notebook-disable-terminal = "false"
|
||||||
|
notebook-disable-root = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Remove once terraform-provider-google/issues/9164 is fixed
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [disk_encryption, kms_key]
|
||||||
|
}
|
||||||
|
|
||||||
|
#TODO Uncomment once terraform-provider-google/issues/9273 is fixed
|
||||||
|
# tags = ["ssh"]
|
||||||
|
depends_on = [
|
||||||
|
google_project_iam_member.shared_vpc,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue