add: uptime-kuma and workflow files (#1)

* add: uptime-kuma and workflow files

* imp: use latest uptime kuma for better DB management

* ref(deploy): allow an external `mariadb` database

* fix(db): patch `knex_init_db.js` file

* fix(runtime): avoid spawning zombie processes

* chore: do not commit `trunk` linting confs

* fix(actions): permissions

* imp(deploy): use secrets from GCP secret manager
This commit is contained in:
Gustavo Valverde 2024-07-15 10:53:05 +01:00 committed by GitHub
parent 7dac323258
commit 16a3f21f39
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 1291 additions and 0 deletions

19
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,19 @@
version: 2
updates:
- package-ecosystem: docker
directory: /
schedule:
interval: monthly
commit-message:
prefix: "deps(docker) "
- package-ecosystem: github-actions
directory: /
schedule:
interval: monthly
commit-message:
prefix: "deps(actions) "
groups:
devops:
patterns:
- "*"

61
.github/workflows/cd-deploy-to-dev.yml vendored Normal file
View File

@ -0,0 +1,61 @@
name: Deploy to dev
on:
pull_request:
types: [opened, synchronize, reopened, labeled]
paths:
- '**/Dockerfile'
- 'scripts/**'
- 'etc/litestream.yml'
- .github/workflows/cd-deploy-to-dev.yml
- .github/workflows/sub-cloudrun-deploy.yml
concurrency:
# Ensures that only one workflow task will run at a time. Previous builds, if
# already in process, will get cancelled. Only the latest commit will be allowed
# to run, cancelling any workflows in between
group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
actions: read
attestations: read
checks: read
contents: read
deployments: read
id-token: write
issues: read
discussions: read
packages: read
pages: read
pull-requests: read
repository-projects: read
security-events: read
statuses: read
jobs:
build:
uses: ./.github/workflows/sub-build-docker-image.yml
with:
environment: dev
dockerfile_path: ./docker/Dockerfile
dockerfile_target: runner
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
secrets: inherit
deploy:
needs: [build]
uses: ./.github/workflows/sub-cloudrun-deploy.yml
with:
environment: dev
project_id: ${{ vars.GCP_PROJECT }}
region: ${{ vars.GCP_REGION }}
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
image_digest: ${{ needs.build.outputs.image_digest }}
min_instances: '0'
max_instances: '30'
cpu: '1'
memory: 1Gi
secrets: inherit

57
.github/workflows/cd-deploy-to-prod.yml vendored Normal file
View File

@ -0,0 +1,57 @@
name: Deploy to prod
on:
release:
types:
- published
concurrency:
# Ensures that only one workflow task will run at a time. Previous builds, if
# already in process, will get cancelled. Only the latest commit will be allowed
# to run, cancelling any workflows in between
group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
actions: read
attestations: read
checks: read
contents: read
deployments: read
id-token: write
issues: read
discussions: read
packages: read
pages: read
pull-requests: read
repository-projects: read
security-events: read
statuses: read
jobs:
build:
# needs: [test]
uses: ./.github/workflows/sub-build-docker-image.yml
with:
environment: prod
dockerfile_path: ./docker/Dockerfile
dockerfile_target: runner
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
secrets: inherit
deploy:
needs: [build]
uses: ./.github/workflows/sub-cloudrun-deploy.yml
with:
environment: prod
project_id: ${{ vars.GCP_PROJECT }}
region: ${{ vars.GCP_REGION }}
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
image_digest: ${{ needs.build.outputs.image_digest }}
min_instances: '1'
max_instances: '10'
cpu: '1'
memory: 1Gi
secrets: inherit

62
.github/workflows/cd-deploy-to-test.yml vendored Normal file
View File

@ -0,0 +1,62 @@
name: Deploy to test
on:
push:
branches:
- main
paths:
- '**/Dockerfile'
- 'scripts/**'
- 'etc/litestream.yml'
- .github/workflows/cd-deploy-to-test.yml
- .github/workflows/sub-cloudrun-deploy.yml
concurrency:
# Ensures that only one workflow task will run at a time. Previous builds, if
# already in process, will get cancelled. Only the latest commit will be allowed
# to run, cancelling any workflows in between
group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
actions: read
attestations: read
checks: read
contents: read
deployments: read
id-token: write
issues: read
discussions: read
packages: read
pages: read
pull-requests: read
repository-projects: read
security-events: read
statuses: read
jobs:
build:
uses: ./.github/workflows/sub-build-docker-image.yml
with:
environment: test
dockerfile_path: ./docker/Dockerfile
dockerfile_target: runner
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
secrets: inherit
deploy:
needs: [build]
uses: ./.github/workflows/sub-cloudrun-deploy.yml
with:
environment: test
project_id: ${{ vars.GCP_PROJECT }}
region: ${{ vars.GCP_REGION }}
app_name: ${{ vars.APP_NAME }}
registry: ${{ vars.GAR_BASE }}
image_digest: ${{ needs.build.outputs.image_digest }}
min_instances: '0'
max_instances: '30'
cpu: '1'
memory: 1Gi
secrets: inherit

35
.github/workflows/chore-clean-dev.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Clean dev instances
on:
delete:
pull_request:
branches:
- main
types:
- closed
permissions: read-all
jobs:
delete:
runs-on: ubuntu-latest
permissions:
contents: 'read'
id-token: 'write'
steps:
- name: Inject slug/short variables
uses: rlespinasse/github-slug-action@v4.5.0
- name: Authenticate to Google Cloud
id: auth
uses: google-github-actions/auth@v2.1.3
with:
workload_identity_provider: '${{ vars.GCP_WIF }}'
project_id: '${{ vars.GCP_PROJECT }}'
- name: Set up Cloud SDK
uses: google-github-actions/setup-gcloud@v2.1.0
- name: Removing CR service
run: |
gcloud run services delete ${{ vars.APP_NAME }}-${{ env.GITHUB_HEAD_REF_SLUG || env.GITHUB_REF_SLUG }} --region=${{ vars.GOOGLE_CLOUD_REGION }} --quiet

View File

@ -0,0 +1,18 @@
name: Lint Code Base
on:
pull_request:
branches: [main]
paths-ignore:
- '**/Dockerfile'
- 'scripts/**'
- 'etc/litestream.yml'
- .github/workflows/ci-lint-codebase.yml
permissions: read-all
jobs:
linter:
runs-on: ubuntu-latest
steps:
- run: echo "Job not required"

57
.github/workflows/ci-lint-codebase.yml vendored Normal file
View File

@ -0,0 +1,57 @@
name: Lint Code Base
on:
pull_request:
branches: [main]
paths:
- '**/Dockerfile'
- 'scripts/**'
- 'etc/litestream.yml'
- .github/workflows/ci-lint-codebase.yml
push:
branches: [main]
paths:
- '**.sh*'
- '**.ts*'
- Dockerfile
- package.json
- pnpm-lock.yaml
- .github/workflows/ci-lint-codebase.yml
concurrency:
# Ensures that only one workflow task will run at a time. Previous builds, if
# already in process, will get cancelled. Only the latest commit will be allowed
# to run, cancelling any workflows in between
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions: read-all
jobs:
linter:
runs-on: ubuntu-latest
steps:
- name: Checkout Code Repository
uses: actions/checkout@v4.1.7
with:
# Full git history is needed to get a proper
# list of changed files within `super-linter`
fetch-depth: 0
- name: Lint Code Base
uses: super-linter/super-linter/slim@v6.7.0
env:
LOG_LEVEL: ERROR
VALIDATE_ALL_CODEBASE: false
VALIDATE_SHELL_SHFMT: false
VALIDATE_JSCPD: false
VALIDATE_CSS: false
VALIDATE_EDITORCONFIG: false
VALIDATE_MARKDOWN: false
VALIDATE_JAVASCRIPT_ES: false
VALIDATE_JAVASCRIPT_STANDARD: false
VALIDATE_DOCKERFILE_HADOLINT: false
LINTER_RULES_PATH: /
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,119 @@
name: Build docker image
on:
workflow_call:
inputs:
app_name:
required: true
type: string
dockerfile_path:
required: true
type: string
dockerfile_target:
required: true
type: string
registry:
required: true
type: string
environment:
required: true
type: string
outputs:
image_digest:
description: The image digest to be used on a caller workflow
value: ${{ jobs.build.outputs.image_digest }}
permissions: read-all
jobs:
build:
name: Build images
timeout-minutes: 15
runs-on: ubuntu-latest
outputs:
image_digest: ${{ steps.docker_build.outputs.digest }}
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v4.1.7
with:
persist-credentials: false
- name: Inject slug/short variables
uses: rlespinasse/github-slug-action@v4.5.0
with:
short-length: 7
# Automatic tag management and OCI Image Format Specification for labels
- name: Docker meta
id: meta
uses: docker/metadata-action@v5.5.1
with:
# list of Docker images to use as base name for tags
images: |
${{ inputs.registry }}/${{ inputs.app_name }}
# generate Docker tags based on the following events/attributes
tags: |
type=schedule
# semver and ref,tag automatically add a "latest" tag, but only on stable releases
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=ref,event=tag
type=ref,event=branch
type=ref,event=pr
type=sha
# edge is the latest commit on the default branch.
type=edge,enable={{is_default_branch}}
# Setup Docker Buildx to allow use of docker cache layers from GH
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3.4.0
- name: Authenticate to Google Cloud
id: auth
uses: google-github-actions/auth@v2.1.3
with:
workload_identity_provider: '${{ vars.GCP_WIF }}'
service_account: '${{ vars.GCP_ARTIFACTS_SA }}'
token_format: 'access_token'
# Some builds might take over an hour, and Google's default lifetime duration for
# an access token is 1 hour (3600s). We increase this to 3 hours (10800s)
# as some builds take over an hour.
access_token_lifetime: 10800s
- name: Login to Google Artifact Registry
uses: docker/login-action@v3.2.0
with:
registry: us-docker.pkg.dev
username: oauth2accesstoken
password: ${{ steps.auth.outputs.access_token }}
# Build and push image to Google Artifact Registry, and possibly DockerHub
- name: Build & push
id: docker_build
uses: docker/build-push-action@v6.3.0
with:
target: ${{ inputs.dockerfile_target }}
context: .
file: ${{ inputs.dockerfile_path }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
push: true
# To improve build speeds, for each branch we push an additional image to the registry,
# to be used as the caching layer, using the `max` caching mode.
#
# We use multiple cache sources to confirm a cache hit, starting from a per-branch cache,
# and if there's no hit, then continue with the `main` branch. When changes are added to a PR,
# they are usually smaller than the diff between the PR and `main` branch. So this provides the
# best performance.
#
# The caches are tried in top-down order, the first available cache is used:
# https://github.com/moby/moby/pull/26839#issuecomment-277383550
cache-from: |
type=registry,ref=${{ inputs.registry }}/${{ inputs.app_name }}:${{ env.GITHUB_REF_SLUG_URL }}-cache
type=registry,ref=${{ inputs.registry }}/${{ inputs.app_name }}:${{ github.event.repository.default_branch }}-cache
cache-to: |
type=registry,ref=${{ inputs.registry }}/${{ inputs.app_name }}:${{ env.GITHUB_REF_SLUG_URL }}-cache,mode=min

View File

@ -0,0 +1,130 @@
name: Deploy to Cloud Run
on:
workflow_call:
inputs:
app_name:
required: true
type: string
registry:
required: true
type: string
image_digest:
required: true
type: string
description: The image digest to deploy
project_id:
required: false
type: string
description: The project to deploy to
region:
required: true
type: string
description: The region to deploy to
environment:
required: false
type: string
description: The environment to deploy to
min_instances:
required: false
type: string
description: The minimum number of instances to deploy
max_instances:
required: false
type: string
description: The maximum number of instances to deploy
cpu:
required: false
type: string
description: The number of CPUs to use for the service
memory:
required: false
type: string
description: The amount of memory to use for the service
permissions: read-all
jobs:
versioning:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.set.outputs.version }}
steps:
- name: Getting API Version
id: get
uses: actions/github-script@v7
if: ${{ github.event_name == 'release' }}
with:
result-encoding: string
script: |
return context.payload.release.tag_name.substring(0,2)
- name: Setting API Version
id: set
run: echo "version=${{ steps.get.outputs.result }}" >> "$GITHUB_OUTPUT"
deploy:
name: Deploy to Cloud Run
needs: [versioning]
timeout-minutes: 10
runs-on: ubuntu-latest
environment:
name: ${{ inputs.environment }}
url: ${{ steps.deploy.outputs.url }}
permissions:
contents: read
id-token: write
steps:
- name: Inject slug/short variables
uses: rlespinasse/github-slug-action@v4.5.0
- uses: actions/checkout@v4.1.7
with:
persist-credentials: false
- name: Authenticate to Google Cloud
id: auth
uses: google-github-actions/auth@v2.1.3
with:
workload_identity_provider: '${{ vars.GCP_WIF }}'
project_id: '${{ vars.GCP_PROJECT }}'
- name: Set up Cloud SDK
uses: google-github-actions/setup-gcloud@v2.1.0
- name: Deploy to cloud run
id: deploy
uses: google-github-actions/deploy-cloudrun@v2.6.0
with:
service: ${{ inputs.app_name }}-${{ needs.versioning.outputs.version || env.GITHUB_HEAD_REF_SLUG || inputs.environment }}
image: ${{ inputs.registry }}/${{ inputs.app_name }}@${{ inputs.image_digest }}
region: ${{ inputs.region }}
gcloud_component: alpha
env_vars: |
REPLICA_URL=${{ vars.REPLICA_URL }}
UPTIME_KUMA_DB_TYPE=${{ vars.UPTIME_KUMA_DB_TYPE }}
UPTIME_KUMA_DB_HOSTNAME=${{ vars.UPTIME_KUMA_DB_HOSTNAME }}
UPTIME_KUMA_DB_PORT=${{ vars.UPTIME_KUMA_DB_PORT }}
UPTIME_KUMA_DB_NAME=${{ vars.UPTIME_KUMA_DB_NAME }}
UPTIME_KUMA_DB_USERNAME=${{ vars.UPTIME_KUMA_DB_USERNAME }}
env_vars_update_strategy: overwrite
secrets: |
UPTIME_KUMA_DB_PASSWORD=UPTIME_KUMA_DB_PASSWORD:latest
flags: |
--min-instances=${{ inputs.min_instances }}
--max-instances=${{ inputs.max_instances }}
--cpu=${{ inputs.cpu }}
--memory=${{ inputs.memory }}
--service-account=${{ vars.GCP_BUCKET_SA }}
--set-cloudsql-instances=${{ vars.CLOUDSQL_INSTANCE }}
--add-volume=name=files,type=in-memory
--add-volume-mount=volume=files,mount-path=/app/data
--network=projects/zfnd-dev-net-spoke-0/global/networks/dev-spoke-0
--subnet=projects/zfnd-dev-net-spoke-0/regions/us-east1/subnetworks/dev-default-ue1
- name: Allow unauthenticated calls to the service
run: |
gcloud run services add-iam-policy-binding ${{ inputs.app_name }}-${{ needs.versioning.outputs.version || env.GITHUB_HEAD_REF_SLUG || inputs.environment }} \
--region=${{ inputs.region }} --member=allUsers --role=roles/run.invoker --quiet
- name: Test service with cURL
run: curl "${{ steps.deploy.outputs.url }}"

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
.trunk
# Created by https://www.toptal.com/developers/gitignore/api/windows,linux,macos,visualstudiocode,jetbrains,node,nextjs,vercel,amplify
# Edit at https://www.toptal.com/developers/gitignore?templates=windows,linux,macos,visualstudiocode,jetbrains,node,nextjs,vercel,amplify

46
docker/Dockerfile Normal file
View File

@ -0,0 +1,46 @@
# syntax=docker/dockerfile:1
# ===================== Create base stage =====================
ARG NODE_VERSION=lts
ARG UPTIME_KUMA_VERSION=nightly2
ARG APP_HOME=/app
FROM louislam/uptime-kuma:${UPTIME_KUMA_VERSION} AS base
ARG PORT=3001
ARG APP_HOME
ENV APP_HOME=${APP_HOME}
WORKDIR ${APP_HOME}
# ==== App specific variables
ENV UPTIME_KUMA_IS_CONTAINER=1
ARG DATA_DIR=./data/
ENV DATA_DIR=${DATA_DIR}
ENV DB_PATH=${DATA_DIR}kuma.db
# ===================== App Runner Stage =====================
FROM base AS runner
# Copy all necessary files
COPY --from=litestream/litestream:0.3.13 /usr/local/bin/litestream /usr/local/bin/litestream
# Create data directory (although this will likely be mounted too) as some services won't mount it.
RUN mkdir -p "${DATA_DIR}"
EXPOSE ${PORT}
ENV PORT=${PORT}
HEALTHCHECK --interval=60s --timeout=30s --start-period=180s --retries=5 CMD extra/healthcheck
# Copy Litestream configuration file & startup script.
COPY etc/litestream.yml /etc/litestream.yml
COPY scripts/run.sh /scripts/run.sh
COPY scripts/db/2024-07-11-0000-dns-results.js ./db/2024-07-11-0000-dns-results.js
COPY scripts/db/knex_init_db.js ./db/knex_init_db.js
USER node
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD [ "/scripts/run.sh" ]

4
etc/litestream.yml Normal file
View File

@ -0,0 +1,4 @@
dbs:
- path: ${DB_PATH}
replicas:
- url: ${REPLICA_URL}

View File

@ -0,0 +1,11 @@
exports.up = function (knex) {
return knex.schema.table('monitor', function (table) {
table.string('dns_last_result', 2000).alter();
});
};
exports.down = function (knex) {
return knex.schema.table('monitor', function (table) {
table.string('dns_last_result', 255).alter();
});
};

654
scripts/db/knex_init_db.js Normal file
View File

@ -0,0 +1,654 @@
const { R } = require('redbean-node');
const { log } = require('../src/util');
/**
* DO NOT ADD ANYTHING HERE!
* IF YOU NEED TO ADD FIELDS, ADD IT TO ./db/knex_migrations
* See ./db/knex_migrations/README.md for more information
* @returns {Promise<void>}
*/
async function createTables() {
log.info('mariadb', 'Creating basic tables for MariaDB');
const knex = R.knex;
// TODO: Should check later if it is really the final patch sql file.
// docker_host
await knex.schema.createTable('docker_host', (table) => {
table.increments('id');
table.integer('user_id').unsigned().notNullable();
table.string('docker_daemon', 255);
table.string('docker_type', 255);
table.string('name', 255);
});
// group
await knex.schema.createTable('group', (table) => {
table.increments('id');
table.string('name', 255).notNullable();
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
table.boolean('public').notNullable().defaultTo(false);
table.boolean('active').notNullable().defaultTo(true);
table.integer('weight').notNullable().defaultTo(1000);
table.integer('status_page_id').unsigned();
});
// proxy
await knex.schema.createTable('proxy', (table) => {
table.increments('id');
table.integer('user_id').unsigned().notNullable();
table.string('protocol', 10).notNullable();
table.string('host', 255).notNullable();
table.smallint('port').notNullable(); // TODO: Maybe a issue with MariaDB, need migration to int
table.boolean('auth').notNullable();
table.string('username', 255).nullable();
table.string('password', 255).nullable();
table.boolean('active').notNullable().defaultTo(true);
table.boolean('default').notNullable().defaultTo(false);
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
table.index('user_id', 'proxy_user_id');
});
// user
await knex.schema.createTable('user', (table) => {
table.increments('id');
table
.string('username', 255)
.notNullable()
.unique()
.collate('utf8_general_ci');
table.string('password', 255);
table.boolean('active').notNullable().defaultTo(true);
table.string('timezone', 150);
table.string('twofa_secret', 64);
table.boolean('twofa_status').notNullable().defaultTo(false);
table.string('twofa_last_token', 6);
});
// monitor
await knex.schema.createTable('monitor', (table) => {
table.increments('id');
table.string('name', 150);
table.boolean('active').notNullable().defaultTo(true);
table
.integer('user_id')
.unsigned()
.references('id')
.inTable('user')
.onDelete('SET NULL')
.onUpdate('CASCADE');
table.integer('interval').notNullable().defaultTo(20);
table.text('url');
table.string('type', 20);
table.integer('weight').defaultTo(2000);
table.string('hostname', 255);
table.integer('port');
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
table.string('keyword', 255);
table.integer('maxretries').notNullable().defaultTo(0);
table.boolean('ignore_tls').notNullable().defaultTo(false);
table.boolean('upside_down').notNullable().defaultTo(false);
table.integer('maxredirects').notNullable().defaultTo(10);
table
.text('accepted_statuscodes_json')
.notNullable()
.defaultTo('["200-299"]');
table.string('dns_resolve_type', 5);
table.string('dns_resolve_server', 255);
table.string('dns_last_result', 2000);
table.integer('retry_interval').notNullable().defaultTo(0);
table.string('push_token', 20).defaultTo(null);
table.text('method').notNullable().defaultTo('GET');
table.text('body').defaultTo(null);
table.text('headers').defaultTo(null);
table.text('basic_auth_user').defaultTo(null);
table.text('basic_auth_pass').defaultTo(null);
table
.integer('docker_host')
.unsigned()
.references('id')
.inTable('docker_host');
table.string('docker_container', 255);
table.integer('proxy_id').unsigned().references('id').inTable('proxy');
table.boolean('expiry_notification').defaultTo(true);
table.text('mqtt_topic');
table.string('mqtt_success_message', 255);
table.string('mqtt_username', 255);
table.string('mqtt_password', 255);
table.string('database_connection_string', 2000);
table.text('database_query');
table.string('auth_method', 250);
table.text('auth_domain');
table.text('auth_workstation');
table.string('grpc_url', 255).defaultTo(null);
table.text('grpc_protobuf').defaultTo(null);
table.text('grpc_body').defaultTo(null);
table.text('grpc_metadata').defaultTo(null);
table.text('grpc_method').defaultTo(null);
table.text('grpc_service_name').defaultTo(null);
table.boolean('grpc_enable_tls').notNullable().defaultTo(false);
table.string('radius_username', 255);
table.string('radius_password', 255);
table.string('radius_calling_station_id', 50);
table.string('radius_called_station_id', 50);
table.string('radius_secret', 255);
table.integer('resend_interval').notNullable().defaultTo(0);
table.integer('packet_size').notNullable().defaultTo(56);
table.string('game', 255);
});
// heartbeat
await knex.schema.createTable('heartbeat', (table) => {
table.increments('id');
table.boolean('important').notNullable().defaultTo(false);
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.smallint('status').notNullable();
table.text('msg');
table.datetime('time').notNullable();
table.integer('ping');
table.integer('duration').notNullable().defaultTo(0);
table.integer('down_count').notNullable().defaultTo(0);
table.index('important');
table.index(['monitor_id', 'time'], 'monitor_time_index');
table.index('monitor_id');
table.index(
['monitor_id', 'important', 'time'],
'monitor_important_time_index'
);
});
// incident
await knex.schema.createTable('incident', (table) => {
table.increments('id');
table.string('title', 255).notNullable();
table.text('content', 255).notNullable();
table.string('style', 30).notNullable().defaultTo('warning');
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
table.datetime('last_updated_date');
table.boolean('pin').notNullable().defaultTo(true);
table.boolean('active').notNullable().defaultTo(true);
table.integer('status_page_id').unsigned();
});
// maintenance
await knex.schema.createTable('maintenance', (table) => {
table.increments('id');
table.string('title', 150).notNullable();
table.text('description').notNullable();
table
.integer('user_id')
.unsigned()
.references('id')
.inTable('user')
.onDelete('SET NULL')
.onUpdate('CASCADE');
table.boolean('active').notNullable().defaultTo(true);
table.string('strategy', 50).notNullable().defaultTo('single');
table.datetime('start_date');
table.datetime('end_date');
table.time('start_time');
table.time('end_time');
table.string('weekdays', 250).defaultTo('[]');
table.text('days_of_month').defaultTo('[]');
table.integer('interval_day');
table.index('active');
table.index(['strategy', 'active'], 'manual_active');
table.index('user_id', 'maintenance_user_id');
});
// status_page
await knex.schema.createTable('status_page', (table) => {
table.increments('id');
table.string('slug', 255).notNullable().unique().collate('utf8_general_ci');
table.string('title', 255).notNullable();
table.text('description');
table.string('icon', 255).notNullable();
table.string('theme', 30).notNullable();
table.boolean('published').notNullable().defaultTo(true);
table.boolean('search_engine_index').notNullable().defaultTo(true);
table.boolean('show_tags').notNullable().defaultTo(false);
table.string('password');
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
table.datetime('modified_date').notNullable().defaultTo(knex.fn.now());
table.text('footer_text');
table.text('custom_css');
table.boolean('show_powered_by').notNullable().defaultTo(true);
table.string('google_analytics_tag_id');
});
// maintenance_status_page
await knex.schema.createTable('maintenance_status_page', (table) => {
table.increments('id');
table
.integer('status_page_id')
.unsigned()
.notNullable()
.references('id')
.inTable('status_page')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table
.integer('maintenance_id')
.unsigned()
.notNullable()
.references('id')
.inTable('maintenance')
.onDelete('CASCADE')
.onUpdate('CASCADE');
});
// maintenance_timeslot
await knex.schema.createTable('maintenance_timeslot', (table) => {
table.increments('id');
table
.integer('maintenance_id')
.unsigned()
.notNullable()
.references('id')
.inTable('maintenance')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.datetime('start_date').notNullable();
table.datetime('end_date');
table.boolean('generated_next').defaultTo(false);
table.index('maintenance_id');
table.index(
['maintenance_id', 'start_date', 'end_date'],
'active_timeslot_index'
);
table.index('generated_next', 'generated_next_index');
});
// monitor_group
await knex.schema.createTable('monitor_group', (table) => {
table.increments('id');
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table
.integer('group_id')
.unsigned()
.notNullable()
.references('id')
.inTable('group')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.integer('weight').notNullable().defaultTo(1000);
table.boolean('send_url').notNullable().defaultTo(false);
table.index(['monitor_id', 'group_id'], 'fk');
});
// monitor_maintenance
await knex.schema.createTable('monitor_maintenance', (table) => {
table.increments('id');
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table
.integer('maintenance_id')
.unsigned()
.notNullable()
.references('id')
.inTable('maintenance')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.index('maintenance_id', 'maintenance_id_index2');
table.index('monitor_id', 'monitor_id_index');
});
// notification
await knex.schema.createTable('notification', (table) => {
table.increments('id');
table.string('name', 255);
table.boolean('active').notNullable().defaultTo(true);
table.integer('user_id').unsigned();
table.boolean('is_default').notNullable().defaultTo(false);
table.text('config', 'longtext');
});
// monitor_notification
await knex.schema.createTable('monitor_notification', (table) => {
table.increments('id').unsigned(); // TODO: no auto increment????
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table
.integer('notification_id')
.unsigned()
.notNullable()
.references('id')
.inTable('notification')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.index(
['monitor_id', 'notification_id'],
'monitor_notification_index'
);
});
// tag
await knex.schema.createTable('tag', (table) => {
table.increments('id');
table.string('name', 255).notNullable();
table.string('color', 255).notNullable();
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
});
// monitor_tag
await knex.schema.createTable('monitor_tag', (table) => {
table.increments('id');
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table
.integer('tag_id')
.unsigned()
.notNullable()
.references('id')
.inTable('tag')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.text('value');
});
// monitor_tls_info
await knex.schema.createTable('monitor_tls_info', (table) => {
table.increments('id');
table
.integer('monitor_id')
.unsigned()
.notNullable()
.references('id')
.inTable('monitor')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.text('info_json');
});
// notification_sent_history
await knex.schema.createTable('notification_sent_history', (table) => {
table.increments('id');
table.string('type', 50).notNullable();
table.integer('monitor_id').unsigned().notNullable();
table.integer('days').notNullable();
table.unique(['type', 'monitor_id', 'days']);
table.index(['type', 'monitor_id', 'days'], 'good_index');
});
// setting
await knex.schema.createTable('setting', (table) => {
table.increments('id');
table.string('key', 200).notNullable().unique().collate('utf8_general_ci');
table.text('value');
table.string('type', 20);
});
// status_page_cname
await knex.schema.createTable('status_page_cname', (table) => {
table.increments('id');
table
.integer('status_page_id')
.unsigned()
.references('id')
.inTable('status_page')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.string('domain').notNullable().unique().collate('utf8_general_ci');
});
/*********************
* Converted Patch here
*********************/
// 2023-06-30-1348-http-body-encoding.js
// ALTER TABLE monitor ADD http_body_encoding VARCHAR(25);
// UPDATE monitor SET http_body_encoding = 'json' WHERE (type = 'http' or type = 'keyword') AND http_body_encoding IS NULL;
await knex.schema.table('monitor', function (table) {
table.string('http_body_encoding', 25);
});
await knex('monitor')
.where(function () {
this.where('type', 'http').orWhere('type', 'keyword');
})
.whereNull('http_body_encoding')
.update({
http_body_encoding: 'json',
});
// 2023-06-30-1354-add-description-monitor.js
// ALTER TABLE monitor ADD description TEXT default null;
await knex.schema.table('monitor', function (table) {
table.text('description').defaultTo(null);
});
// 2023-06-30-1357-api-key-table.js
/*
CREATE TABLE [api_key] (
[id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
[key] VARCHAR(255) NOT NULL,
[name] VARCHAR(255) NOT NULL,
[user_id] INTEGER NOT NULL,
[created_date] DATETIME DEFAULT (DATETIME('now')) NOT NULL,
[active] BOOLEAN DEFAULT 1 NOT NULL,
[expires] DATETIME DEFAULT NULL,
CONSTRAINT FK_user FOREIGN KEY ([user_id]) REFERENCES [user]([id]) ON DELETE CASCADE ON UPDATE CASCADE
);
*/
await knex.schema.createTable('api_key', function (table) {
table.increments('id').primary();
table.string('key', 255).notNullable();
table.string('name', 255).notNullable();
table
.integer('user_id')
.unsigned()
.notNullable()
.references('id')
.inTable('user')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.dateTime('created_date').defaultTo(knex.fn.now()).notNullable();
table.boolean('active').defaultTo(1).notNullable();
table.dateTime('expires').defaultTo(null);
});
// 2023-06-30-1400-monitor-tls.js
/*
ALTER TABLE monitor
ADD tls_ca TEXT default null;
ALTER TABLE monitor
ADD tls_cert TEXT default null;
ALTER TABLE monitor
ADD tls_key TEXT default null;
*/
await knex.schema.table('monitor', function (table) {
table.text('tls_ca').defaultTo(null);
table.text('tls_cert').defaultTo(null);
table.text('tls_key').defaultTo(null);
});
// 2023-06-30-1401-maintenance-cron.js
/*
-- 999 characters. https://stackoverflow.com/questions/46134830/maximum-length-for-cron-job
DROP TABLE maintenance_timeslot;
ALTER TABLE maintenance ADD cron TEXT;
ALTER TABLE maintenance ADD timezone VARCHAR(255);
ALTER TABLE maintenance ADD duration INTEGER;
*/
await knex.schema
.dropTableIfExists('maintenance_timeslot')
.table('maintenance', function (table) {
table.text('cron');
table.string('timezone', 255);
table.integer('duration');
});
// 2023-06-30-1413-add-parent-monitor.js.
/*
ALTER TABLE monitor
ADD parent INTEGER REFERENCES [monitor] ([id]) ON DELETE SET NULL ON UPDATE CASCADE;
*/
await knex.schema.table('monitor', function (table) {
table
.integer('parent')
.unsigned()
.references('id')
.inTable('monitor')
.onDelete('SET NULL')
.onUpdate('CASCADE');
});
/*
patch-add-invert-keyword.sql
ALTER TABLE monitor
ADD invert_keyword BOOLEAN default 0 not null;
*/
await knex.schema.table('monitor', function (table) {
table.boolean('invert_keyword').defaultTo(0).notNullable();
});
/*
patch-added-json-query.sql
ALTER TABLE monitor
ADD json_path TEXT;
ALTER TABLE monitor
ADD expected_value VARCHAR(255);
*/
await knex.schema.table('monitor', function (table) {
table.text('json_path');
table.string('expected_value', 255);
});
/*
patch-added-kafka-producer.sql
ALTER TABLE monitor
ADD kafka_producer_topic VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_brokers TEXT;
ALTER TABLE monitor
ADD kafka_producer_ssl INTEGER;
ALTER TABLE monitor
ADD kafka_producer_allow_auto_topic_creation VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_sasl_options TEXT;
ALTER TABLE monitor
ADD kafka_producer_message TEXT;
*/
await knex.schema.table('monitor', function (table) {
table.string('kafka_producer_topic', 255);
table.text('kafka_producer_brokers');
// patch-fix-kafka-producer-booleans.sql
table.boolean('kafka_producer_ssl').defaultTo(0).notNullable();
table
.boolean('kafka_producer_allow_auto_topic_creation')
.defaultTo(0)
.notNullable();
table.text('kafka_producer_sasl_options');
table.text('kafka_producer_message');
});
/*
patch-add-certificate-expiry-status-page.sql
ALTER TABLE status_page
ADD show_certificate_expiry BOOLEAN default 0 NOT NULL;
*/
await knex.schema.table('status_page', function (table) {
table.boolean('show_certificate_expiry').defaultTo(0).notNullable();
});
/*
patch-monitor-oauth-cc.sql
ALTER TABLE monitor
ADD oauth_client_id TEXT default null;
ALTER TABLE monitor
ADD oauth_client_secret TEXT default null;
ALTER TABLE monitor
ADD oauth_token_url TEXT default null;
ALTER TABLE monitor
ADD oauth_scopes TEXT default null;
ALTER TABLE monitor
ADD oauth_auth_method TEXT default null;
*/
await knex.schema.table('monitor', function (table) {
table.text('oauth_client_id').defaultTo(null);
table.text('oauth_client_secret').defaultTo(null);
table.text('oauth_token_url').defaultTo(null);
table.text('oauth_scopes').defaultTo(null);
table.text('oauth_auth_method').defaultTo(null);
});
/*
patch-add-timeout-monitor.sql
ALTER TABLE monitor
ADD timeout DOUBLE default 0 not null;
*/
await knex.schema.table('monitor', function (table) {
table.double('timeout').defaultTo(0).notNullable();
});
/*
patch-add-gamedig-given-port.sql
ALTER TABLE monitor
ADD gamedig_given_port_only BOOLEAN default 1 not null;
*/
await knex.schema.table('monitor', function (table) {
table.boolean('gamedig_given_port_only').defaultTo(1).notNullable();
});
log.info('mariadb', 'Created basic tables for MariaDB');
}
module.exports = {
createTables,
};

16
scripts/run.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
set -e
if [[ "${UPTIME_KUMA_DB_TYPE}" == 'mariadb' ]]; then
node server/server.js
else
# Restore the database if it does not already exist.
if [[ -f "${DB_PATH}" ]]; then
echo "Database already exists, skipping restore"
else
echo "No database found, restoring from replica if exists"
litestream restore -if-replica-exists -o "${DB_PATH}" "${REPLICA_URL}"
fi
# Run litestream with your app as the subprocess.
exec litestream replicate -exec "node server/server.js"
fi