1
0
Fork 0
mirror of https://gitlab.com/bramw/baserow.git synced 2025-04-04 21:25:24 +00:00

Setup Baserow to build its images in CI for Gitlab.

This commit is contained in:
Nigel Gott 2022-01-18 09:35:45 +00:00
parent 0a090427ac
commit 241958a8ab
25 changed files with 2422 additions and 239 deletions

View file

@ -2,4 +2,6 @@
**/node_modules
**/.pytest_cache
*.iml
.idea/
.idea/
web-frontend/reports/
backend/reports/

5
.gitignore vendored
View file

@ -96,6 +96,8 @@ venv/
web-frontend/plugins/
backend/plugins/
web-frontend/reports/
backend/reports/
.idea/
*.iml
@ -115,3 +117,6 @@ out/
vetur.config.js
formula/out/
.coverage
junit.xml

View file

@ -1,70 +1,599 @@
before_script:
- apt-get update && apt-get -y install make curl gnupg2
# == Summary of Baserow's CI workflow:
#
# This file contains the gitlab CI job definitions that build and test Baserow
# automatically.
#
# === Overview of how Baserow uses git branches
#
# * `develop` is the branch we merge newly developed features onto from feature
# branches.
# * a feature branch is a branch made starting off `develop` containing a specific
# new feature, when finished it will be merged back onto `develop`.
# * `master` is the branch which contains official releases of Baserow, to do so we
# periodically merge the latest changes from `develop` onto `master` and then tag
# that new master commit with a git tag containing the version (1.8.2 etc).
#
# === How new version of Baserow is released to Dockerhub
#
# A. Create an MR from develop to master and merge it.
# B. Wait for the merge commit pipeline succeed on master which will build and test the
# images.
# C. Tag the merge commit in the Gitlab GUI with the git tag being the Baserow version
# (1.8.2, 1.0, etc).
# D. Gitlab will make a new pipeline for the tag which will push the images built in
# step B to Dockerhub. If step B failed or has not completed yet then this pipeline
# will fail and not push anything.
#
# === What Gitlab CI steps are configured to run and when
#
# See below for the high level summary of the steps Gitlab will run to build, test and
# release Baserow images in various scenarios depending on the branches involved.
#
# ==== On the master branch - When MR Merged/commit pushed/branch made
#
# 1. The backend and web-frontend dev images will be built and pushed to the
# gitlab ci image repo.
# 1. A `{image_dev}:ci-latest-$CI_COMMIT_SHA` image is pushed for the next stages.
# 2. A `{image_dev}:ci-latest-$BRANCH_NAME` image is pushed to cache future runs.
# 2. The pushed `ci-latest-$CI_COMMIT_SHA` images will be tested and linted. If a
# previously successful test/lint run is found for the same/prev commit AND no
# files have changed which could possibly change the result this is skipped.
# 3. Cached from the `ci-latest-$CI_COMMIT_SHA` image the non-dev images will be built
# and then both the dev and non-dev images will be with tagged marking them as
# tested and pushed to the gitlab ci repo.
# 4. Trigger a pipeline in any downstream repos that depend on this one.
#
# ==== On the develop branch - When MR Merged/new commit pushed
#
# The build and testing steps 1, 2 and 3 from above are run first and then:
# 4. Push the tested images from step 3 to the Dockerhub repo under the
# `develop-latest` tag.
# 5. Trigger a pipeline in any downstream repos that depend on this one.
#
# ==== On feature branches - When MR Merged/new commit pushed
#
# The build and testing steps 1, 2 and 3 from above are run.
#
# ===== On the latest commit on master - When a Git tag is created
#
# This is done when we have merged the latest changes from develop on master, and we
# want to release them as a new version of Baserow. Gitlab will automatically detect
# the new git tag and only do the following:
#
# 1. Push the images built from step 3 above (or fail if they don't exist) to the
# Dockerhub repo with the tags:
# 1. `latest`
# 2. `${git tag}`
#
# ==== Older commit on master - When a Git tag created
#
# 1. Push the images built from step 3 above (or fail if they don't exist) to the
# Dockerhub repo with the tags:
# 1. `${git tag}`
#
# ==== Any non-master commit - When a Git tag created
#
# 1. Fail as only master commits should be tagged/released.
#
# == Cleanup
#
# Images with tags starting with `ci-latest` or `ci-tested` (made in steps 1. and 3.)
# will be deleted after they are 7 days old by a job that runs daily at 11AM CET.
include: '/.gitlab/ci_includes/jobs.yml'
stages:
- lint
- test
- build
- test
- build-final
- publish
web-frontend-eslint:
stage: lint
image: node:12
script:
- cd web-frontend
- make install-dependencies
- make eslint
variables:
ENABLE_JOB_SKIPPING:
value: "true"
description: "If set to false then tests and lints will be forced to run and not use previously cached results."
ENABLE_COVERAGE:
value: "true"
description: "If set to false then tests will not generate coverage or testing reports used by gitlab to show nicer MRs."
# An image repo which is used for storing and passing images between ci pipeline jobs
# and also speeding up ci builds by caching from the latest ci image when building.
CI_IMAGE_REPO: $CI_REGISTRY_IMAGE/ci
# Any images with tags prefixed with the two variables below will be cleaned up automatically
# by our gitlab cleanup job:
# (https://gitlab.com/bramw/baserow/-/settings/packages_and_registries).
#
# ## Note:
# These cleanup tag prefixes are needed as gitlab only supports cleanup by defining
# a regex that matches tags, so we can't do cleanup differently based on image name
# or repo...
#
# IMPORTANT: UPDATE GITLAB CONTAINER REPO CLEANUP JOB REGEX IF YOU CHANGE THIS
CLEANUP_JOB_CI_TAG_PREFIX: ci-latest-
# IMPORTANT: UPDATE GITLAB CONTAINER REPO CLEANUP JOB REGEX IF YOU CHANGE THIS
TESTED_IMAGE_PREFIX: ci-tested-
# An image repo where dev and normal images will be released to for public usage after
# they have been successfully built and tested.
RELEASE_IMAGE_REPO: $CI_REGISTRY_IMAGE/testing
BACKEND_IMAGE_NAME: backend
BACKEND_DEV_IMAGE_NAME: backend_dev
WEBFRONTEND_IMAGE_NAME: web-frontend
WEBFRONTEND_DEV_IMAGE_NAME: web-frontend_dev
BACKEND_CI_DEV_IMAGE: $CI_IMAGE_REPO/$BACKEND_DEV_IMAGE_NAME:$CLEANUP_JOB_CI_TAG_PREFIX$CI_COMMIT_SHORT_SHA
WEBFRONTEND_CI_DEV_IMAGE: $CI_IMAGE_REPO/$WEBFRONTEND_DEV_IMAGE_NAME:$CLEANUP_JOB_CI_TAG_PREFIX$CI_COMMIT_SHORT_SHA
# Once images are tested they will publish under these names to ensure that any
# tag only runs of the pipeline can never publish untested images.
TESTED_BACKEND_CI_IMAGE: $CI_IMAGE_REPO/$BACKEND_IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
TESTED_WEBFRONTEND_CI_IMAGE: $CI_IMAGE_REPO/$WEBFRONTEND_IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
TESTED_BACKEND_CI_DEV_IMAGE: $CI_IMAGE_REPO/$BACKEND_DEV_IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
TESTED_WEBFRONTEND_CI_DEV_IMAGE: $CI_IMAGE_REPO/$WEBFRONTEND_DEV_IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
# Used to tag the latest images on $DEVELOP_BRANCH_NAME
DEVELOP_LATEST_TAG: develop-latest
# Names of important branches used to decide when to run certain jobs.
MASTER_BRANCH_NAME: master
DEVELOP_BRANCH_NAME: develop
# The locations of the various dockerfiles to build.
BACKEND_DOCKERFILE_PATH: $CI_PROJECT_DIR/backend/Dockerfile
WEBFRONTEND_DOCKERFILE_PATH: $CI_PROJECT_DIR/web-frontend/Dockerfile
# The image path for the helper CI util image that will be built and pushed to.
CI_UTIL_IMAGE: $CI_IMAGE_REPO/ci_util_image:latest
web-frontend-stylelint:
stage: lint
image: node:12
script:
- cd web-frontend
- make install-dependencies
- make stylelint
web-frontend-test:
stage: test
image: node:12
script:
- cd web-frontend
- make install-dependencies
- make test
backend-flake8:
stage: lint
image: python:3.7
script:
- cd backend
- make install-dependencies
- make install-dev-dependencies
- make lint
backend-pytest:
stage: test
image: python:3.7
build-ci-util-image:
image: docker:20.10.12
stage: build
services:
- docker:20.10.12-dind
variables:
DOCKER_BUILDKIT: 1
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
before_script:
- |
echo "$CI_REGISTRY_PASSWORD" | \
docker login -u "$CI_REGISTRY_USER" "$CI_REGISTRY" --password-stdin
script:
- cd .gitlab/ci_util_image
- docker build -t $CI_UTIL_IMAGE .
- docker push $CI_UTIL_IMAGE
only:
changes:
- .gitlab/ci_util_image/*
# If pipeline not triggered by tag :
# - Builds the backend dev image and stores in ci repo for next stages.
build-backend-image:
extends: .build-baserow-image
variables:
DEV_IMAGE_NAME: $BACKEND_DEV_IMAGE_NAME
DOCKERFILE_PATH: $BACKEND_DOCKERFILE_PATH
# If pipeline not triggered by tag and backend code has changed:
# - Runs the backend lint
backend-lint:
extends:
- .docker-image-test-stage
script:
- docker run --rm $BACKEND_CI_DEV_IMAGE lint
only:
# Skip linting if no change to files
changes:
- backend/**/*
- premium/backend/**/*
# If pipeline not triggered by tag and backend code has not changed:
# - If there is a previous successful backend lint run in the cache then skip.
# - Otherwise runs backend lint.
no-backend-changes-so-try-skip-lint:
extends:
- .skippable-job
- backend-lint
variables:
SKIP_JOB_NAME: backend-lint
# Override inherited only block, so we can run this job in the
# exact opposite situations.
only: null
except:
changes:
- backend/**/*
- premium/backend/**/*
# If pipeline not triggered by tag and backend code has changed:
# - Runs the backend tests (the first 1/3)
# - Generates coverage db's and stores as artifact for later coverage merge and report
backend-test-group-1:
extends:
- .docker-image-test-stage
services:
- docker:20.10.12-dind
- name: postgres:11.3
alias: db
- name: liminspace/mjml-tcpserver:0.10
alias: mjml
variables:
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
POSTGRES_USER: baserow
POSTGRES_PASSWORD: baserow
POSTGRES_DB: baserow
PYTEST_SPLIT_GROUP: 1
script:
- cd backend
- make install-dependencies
- make install-dev-dependencies
- export PYTHONPATH=$CI_PROJECT_DIR/backend/src:$CI_PROJECT_DIR/premium/backend/src
- make test
- MJML_IP=$(cat /etc/hosts | awk '{if ($2 == "mjml") print $1;}')
- ping -w 2 $MJML_IP
- DB_IP=$(cat /etc/hosts | awk '{if ($2 == "db") print $1;}')
- ping -w 2 $DB_IP
- mkdir -p reports
- TEST_TYPE=$([[ "$ENABLE_COVERAGE" = "true" ]] && echo "ci-test" || echo "test")
- |
docker run \
-e PYTEST_SPLITS=3 \
-e PYTEST_SPLIT_GROUP=$PYTEST_SPLIT_GROUP \
--name=baserow_backend_test_container \
--add-host="db:$DB_IP" \
--add-host="mjml:$MJML_IP" \
$BACKEND_CI_DEV_IMAGE $TEST_TYPE;
- docker cp baserow_backend_test_container:/baserow/backend/reports .
- docker rm baserow_backend_test_container
- |
if [[ $PYTEST_SPLIT_GROUP = 1 ]]; then
docker run -e DATABASE_USER=baserow \
-e DATABASE_NAME=baserow \
-e DATABASE_HOST=db \
-e DATABASE_PASSWORD=baserow \
--rm \
--add-host="db:$DB_IP" \
--add-host="mjml:$MJML_IP" \
$BACKEND_CI_DEV_IMAGE ci-check-startup;
fi
only:
# Skip testing on if no change to backend files
changes:
- backend/**/*
- premium/backend/**/*
artifacts:
paths:
- reports/
reports:
junit: reports/report.xml
backend-setup:
stage: build
image: python:3.7
# If pipeline not triggered by tag and backend code has not changed:
# - If there is a previous successful backend test run then download and reuse its
# artifacts (coverage etc).
# - Otherwise runs the backend-test job like normal.
no-backend-changes-so-try-skip-tests-group-1:
extends:
- backend-test-group-1
- .skippable-job
variables:
SKIP_JOB_NAME: backend-test-group-1
DOWNLOAD_AND_UNPACK_ARTIFACTS_ON_SKIP: 'true'
# Override inherited only block, so we can run this job in the
# exact opposite situations.
only: null
except:
changes:
- backend/**/*
- premium/backend/**/*
# Create 2 more separate groups to parallelize pytest by using separate groups to
# decrease overall build time. Pytest xdist doesn't help as the gitlab saas runners only
# have a single virtual core so `pytest -n 2+` slows things down.
backend-test-group-2:
extends: backend-test-group-1
variables:
PYTEST_SPLIT_GROUP: 2
no-backend-changes-so-try-skip-tests-group-2:
extends: no-backend-changes-so-try-skip-tests-group-1
variables:
SKIP_JOB_NAME: backend-test-group-2
PYTEST_SPLIT_GROUP: 2
backend-test-group-3:
extends: backend-test-group-1
variables:
PYTEST_SPLIT_GROUP: 3
no-backend-changes-so-try-skip-tests-group-3:
extends: no-backend-changes-so-try-skip-tests-group-1
variables:
SKIP_JOB_NAME: backend-test-group-3
PYTEST_SPLIT_GROUP: 3
# Collects together all the separate backend coverage databases from previous jobs and
# combines them to generate a single report for gitlab to use. Gitlab itself does not
# correctly merge these if you just add them all separately into artifacts->reports->
# cobertura.
backend-coverage:
image: $CI_UTIL_IMAGE
stage: build-final
interruptible: true
only:
variables:
- $ENABLE_COVERAGE == "true"
# Prevent rebuilds when tagging as all we want to do is tag and push
except:
variables:
- $CI_COMMIT_TAG
# Depend on the `reports` artifacts from the previous jobs
dependencies:
- backend-test-group-1
- backend-test-group-2
- backend-test-group-3
# If the actual tests were skipped then the artifacts will be on these jobs instead
- no-backend-changes-so-try-skip-tests-group-1
- no-backend-changes-so-try-skip-tests-group-2
- no-backend-changes-so-try-skip-tests-group-3
script:
- pip install -e ./backend
- python -c 'import baserow'
- pip install -e ./premium/backend
- python -c 'import baserow_premium'
- export DJANGO_SETTINGS_MODULE='baserow.config.settings.base'
- timeout --preserve-status 10s gunicorn --workers=1 -b 0.0.0.0:8000 -k uvicorn.workers.UvicornWorker baserow.config.asgi:application
- . /baserow/venv/bin/activate
# The reports artifacts will be extracted before the script runs into reports by
# gitlab
- cp reports/.coverage.* .
- export COVERAGE_RCFILE=backend/.coveragerc
- coverage combine
- coverage report
- coverage xml -o coverage.xml
artifacts:
reports:
cobertura: coverage.xml
coverage: '/^TOTAL.+?(\d+\%)$/'
# If pipeline not triggered by tag:
# - Build and store non-dev images in CI repo under the `ci-tested` tag so we know
# those images have passed the tests.
build-final-backend-image:
extends: .build-final-baserow-image
variables:
IMAGE_NAME: $BACKEND_IMAGE_NAME
DEV_IMAGE_NAME: $BACKEND_DEV_IMAGE_NAME
DOCKERFILE_PATH: $BACKEND_DOCKERFILE_PATH
# ==================================== WEB-FRONTEND ====================================
# If pipeline not triggered by tag:
# - Builds the web-frontend dev image and stores in ci repo for next stages.
build-web-frontend-image:
extends: .build-baserow-image
variables:
DEV_IMAGE_NAME: $WEBFRONTEND_DEV_IMAGE_NAME
DOCKERFILE_PATH: $WEBFRONTEND_DOCKERFILE_PATH
# If pipeline not triggered by tag and web-frontend code has changed:
# - Runs eslint and stylelint
# - Stores a web-frontend_lint_success file in the cache so future pipelines can skip
# if no file changes.
web-frontend-lint:
extends:
- .docker-image-test-stage
script:
- docker run --rm $WEBFRONTEND_CI_DEV_IMAGE lint
only:
changes:
- web-frontend/**/*
- premium/web-frontend/**/*
# If pipeline not triggered by tag and web-frontend code has not changed:
# - If there is a previous successful lint run in the cache then skip.
# - otherwise runs lint and stores success file in cache if successful.
no-web-frontend-changes-so-try-skip-lint:
extends:
- web-frontend-lint
- .skippable-job
variables:
SKIP_JOB_NAME: web-frontend-lint
# Override inherited only block so we can run this job in the
# exact opposite situations.
only: null
except:
changes:
- web-frontend/**/*
- premium/web-frontend/**/*
# If pipeline not triggered by tag and web-frontend code has changed:
# - Runs the web-frontend tests
# - Generates coverage and testing reports
# - Stores the reports in the cache if successful
web-frontend-test:
extends:
- .docker-image-test-stage
script:
- mkdir reports/ -p
- TEST_TYPE=$([[ "$ENABLE_COVERAGE" = "true" ]] && echo "ci-test" || echo "test")
- |
docker run --name=webfrontend_test $WEBFRONTEND_CI_DEV_IMAGE $TEST_TYPE \
| tee reports/stdout.txt;
- docker cp webfrontend_test:/baserow/reports .
- docker rm webfrontend_test
only:
# Skip testing on if no change to web-frontend files
changes:
- web-frontend/**/*
- premium/web-frontend/**/*
artifacts:
paths:
- reports/
reports:
cobertura: reports/coverage/cobertura-coverage.xml
junit: reports/junit.xml
coverage: '/Lines\s*:\s*(\d+.?\d*)%/'
# If pipeline not triggered by tag and web-frontend code has not changed:
# - If there is a previous successful webfrontend test run in the cache then skip and
# unpack its coverage reports.
# - Otherwise runs the tests, coverage reporting and stores in cache if successful
# as normal
no-web-frontend-changes-so-try-skip-tests:
extends:
- web-frontend-test
- .skippable-job
variables:
SKIP_JOB_NAME: web-frontend-test
DOWNLOAD_AND_UNPACK_ARTIFACTS_ON_SKIP: 'true'
# Override inherited only block so we can run this job in the
# exact opposite situations.
only: null
except:
changes:
- web-frontend/**/*
- premium/web-frontend/**/*
# If pipeline not triggered by tag:
# - Build and store non-dev images in CI repo under the `ci-tested` tag so we know
# those images have passed the tests.
build-final-web-frontend-image:
extends: .build-final-baserow-image
variables:
IMAGE_NAME: $WEBFRONTEND_IMAGE_NAME
DEV_IMAGE_NAME: $WEBFRONTEND_DEV_IMAGE_NAME
DOCKERFILE_PATH: $WEBFRONTEND_DOCKERFILE_PATH
# ================================== TRIGGER SAAS =====================================
# Triggers a special pipeline in dependant project and passes various variables to it.
# Only on master and develop.
trigger-saas-build:
stage: publish
inherit:
variables:
- CI_COMMIT_BRANCH
- TESTED_BACKEND_CI_IMAGE
- TESTED_WEBFRONTEND_CI_IMAGE
- CI_COMMIT_SHA
- CI_COMMIT_SHORT_SHA
- DEVELOP_BRANCH_NAME
- MASTER_BRANCH_NAME
variables:
UPSTREAM_SHA: $CI_COMMIT_SHA
UPSTREAM_SHORT_SHA: $CI_COMMIT_SHORT_SHA
UPSTREAM_TESTED_BACKEND_CI_IMAGE: $TESTED_BACKEND_CI_IMAGE
UPSTREAM_TESTED_WEBFRONTEND_CI_IMAGE: $TESTED_WEBFRONTEND_CI_IMAGE
only:
changes:
- web-frontend/**/*
- premium/web-frontend/**/*
- backend/**/*
- premium/backend/**/*
variables:
- ($CI_COMMIT_BRANCH == $DEVELOP_BRANCH_NAME || $CI_COMMIT_BRANCH == $MASTER_BRANCH_NAME)
allow_failure: true
trigger:
project: bramw/baserow-saas
branch: $CI_COMMIT_BRANCH
# ================================== PUSHING BACKEND ==================================
# Push baserow/backend:develop_latest
publish-backend-develop-latest-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_BRANCH == $DEVELOP_BRANCH_NAME
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $DEVELOP_BRANCH_NAME
SOURCE_IMAGE: $TESTED_BACKEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$BACKEND_IMAGE_NAME:$DEVELOP_LATEST_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/backend_dev:develop_latest
publish-backend-develop-latest-dev-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_BRANCH == $DEVELOP_BRANCH_NAME
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $DEVELOP_BRANCH_NAME
SOURCE_IMAGE: $TESTED_BACKEND_CI_DEV_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$BACKEND_DEV_IMAGE_NAME:$DEVELOP_LATEST_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/backend:$VERSION_GIT_TAG
publish-backend-release-tagged-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_TAG
variables:
SKIP_IF_TAG_NOT_ON_BRANCH: $MASTER_BRANCH_NAME
SOURCE_IMAGE: $TESTED_BACKEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$BACKEND_IMAGE_NAME:$CI_COMMIT_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/backend:latest
publish-backend-latest-release-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_TAG
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $MASTER_BRANCH_NAME
SKIP_IF_TAG_NOT_ON_BRANCH: $MASTER_BRANCH_NAME
SOURCE_IMAGE: $TESTED_BACKEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$BACKEND_IMAGE_NAME:latest"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# ================================ PUSHING WEB-FRONTEND ===============================
# Push baserow/web-frontend:develop_latest
publish-webfrontend-develop-latest-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_BRANCH == $DEVELOP_BRANCH_NAME
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $DEVELOP_BRANCH_NAME
SOURCE_IMAGE: $TESTED_WEBFRONTEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$WEBFRONTEND_IMAGE_NAME:$DEVELOP_LATEST_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/web-frontend_dev:develop_latest
publish-webfrontend-develop-latest-dev-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_BRANCH == $DEVELOP_BRANCH_NAME
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $DEVELOP_BRANCH_NAME
SOURCE_IMAGE: $TESTED_WEBFRONTEND_CI_DEV_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$WEBFRONTEND_DEV_IMAGE_NAME:$DEVELOP_LATEST_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/web-frontend:$VERSION_GIT_TAG
publish-webfrontend-release-tagged-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_TAG
variables:
SKIP_IF_TAG_NOT_ON_BRANCH: $MASTER_BRANCH_NAME
SOURCE_IMAGE: $TESTED_WEBFRONTEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$WEBFRONTEND_IMAGE_NAME:$CI_COMMIT_TAG"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER
# Push baserow/web-frontend:latest
publish-webfrontend-latest-release-image:
extends: .publish-baserow-image
only:
variables:
- $CI_COMMIT_TAG
variables:
SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH: $MASTER_BRANCH_NAME
SKIP_IF_TAG_NOT_ON_BRANCH: $MASTER_BRANCH_NAME
SOURCE_IMAGE: $TESTED_WEBFRONTEND_CI_IMAGE
TARGET_IMAGE: "$RELEASE_IMAGE_REPO/$WEBFRONTEND_IMAGE_NAME:latest"
TARGET_REGISTRY: $CI_REGISTRY
TARGET_REGISTRY_PASSWORD: $CI_REGISTRY_PASSWORD
TARGET_REGISTRY_USER: $CI_REGISTRY_USER

View file

@ -0,0 +1,409 @@
# ============== "Abstract" ci stages used by real stages =======================
# Builds a dev version of a specific Dockerfile (--target dev) using a previous CI
# image or the latest develop image as a cache to speed up the build. Tags and pushes
# the resulting dev image for later stages in the pipeline to use.
#
# To extend this stage set the DOCKERFILE_PATH and IMAGE_NAME variables.
.build-baserow-image:
image: docker:20.10.12
stage: build
interruptible: true
# Prevent rebuilds when tagging as all we want to do is tag and push the already built image
except:
refs:
- pipelines
variables:
- $CI_COMMIT_TAG
services:
- docker:20.10.12-dind
variables:
DOCKER_BUILDKIT: 1
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
IMAGE_LABELS: >
--label org.opencontainers.image.vendor=$CI_PROJECT_URL
--label org.opencontainers.image.authors=$CI_PROJECT_URL
--label org.opencontainers.image.revision=$CI_COMMIT_SHA
--label org.opencontainers.image.source=$CI_PROJECT_URL
--label org.opencontainers.image.documentation=$CI_PROJECT_URL
--label org.opencontainers.image.licenses=$CI_PROJECT_URL
--label org.opencontainers.image.url=$CI_PROJECT_URL
--label vcs-url=$CI_PROJECT_URL
--label com.gitlab.ci.user=$CI_SERVER_URL/$GITLAB_USER_LOGIN
--label com.gitlab.ci.email=$GITLAB_USER_EMAIL
--label com.gitlab.ci.tagorbranch=$CI_COMMIT_REF_NAME
--label com.gitlab.ci.pipelineurl=$CI_PIPELINE_URL
--label com.gitlab.ci.commiturl=$CI_PROJECT_URL/commit/$CI_COMMIT_SHA
--label com.gitlab.ci.cijoburl=$CI_JOB_URL
--label com.gitlab.ci.mrurl=$CI_PROJECT_URL/-/merge_requests/$CI_MERGE_REQUEST_ID
--label org.opencontainers.image.ref.name=$CI_IMAGE_REPO:$CI_COMMIT_REF_NAME
script:
- |
echo "$CI_REGISTRY_PASSWORD" | \
docker login -u "$CI_REGISTRY_USER" "$CI_REGISTRY" --password-stdin
if [[ -z "$DOCKERFILE_PATH" ]]; then
echo "Must provide DOCKERFILE_PATH as a job variable" 2>&1
exit 1
fi
if [[ -z "$DEV_IMAGE_NAME" ]]; then
echo "Must provide DEV_IMAGE_NAME as a job variable" 2>&1
exit 1
fi
# Try cache from this branches latest image, if not fall back to the latest
# develop image.
# Ensure we don't go over 128 char docker tag length limit
TRUNCATED_BRANCH_NAME=${CI_COMMIT_REF_NAME:0:100}
CI_DEV_LATEST_BRANCH_TAG=$CLEANUP_JOB_CI_TAG_PREFIX$TRUNCATED_BRANCH_NAME
LATEST_CI_IMAGE="$CI_IMAGE_REPO/$DEV_IMAGE_NAME:$CI_DEV_LATEST_BRANCH_TAG"
# ===== 1. Try pull an image we can use to cache the build with =====
# First try the latest CI image for this branch
CACHE_IMAGE=$LATEST_CI_IMAGE
if ! docker pull $CACHE_IMAGE; then
# If that didnt work try the latest dev image from develop
CACHE_IMAGE="$RELEASE_IMAGE_REPO/$DEV_IMAGE_NAME:$DEVELOP_LATEST_TAG";
if ! docker pull $CACHE_IMAGE; then
CACHE_IMAGE=""
fi
fi
EXTRA_BUILD_ARGS=""
if [[ -n "$CACHE_IMAGE" ]]; then
echo "Caching docker build from $CACHE_IMAGE";
EXTRA_BUILD_ARGS="$EXTRA_BUILD_ARGS --cache-from $CACHE_IMAGE";
else
echo "Couldn't find image to cache build using"
fi
# This image tag is one that can be used by subsequent build steps, using the
# latest one might introduce race conditions with concurrent pipelines. Instead
# by using a simple name + sha we know we will be getting the right image later on
# and we can easily re-construct this image path also as $CI_COMMIT_SHORT_SHA is
# available in all stages.
CI_IMAGE_PATH=$CI_IMAGE_REPO/$DEV_IMAGE_NAME:$CLEANUP_JOB_CI_TAG_PREFIX$CI_COMMIT_SHORT_SHA
# ===== 2. Build a dev image to be used in subsequent CI stages =====
if [[ -n "$BUILD_FROM_IMAGE" ]]; then
EXTRA_BUILD_ARGS="$EXTRA_BUILD_ARGS --build-arg FROM_IMAGE=$BUILD_FROM_IMAGE";
echo "Building from $BUILD_FROM_IMAGE."
fi
# * Use `--build-arg BUILDKIT_INLINE_CACHE=1` to ensure this image's itermediate
# layers will be cached so builds caching from this image can use those layers.
# * $CACHE_ARG is a --cache-from if we have an existing image that we can use
# to speed up this build.
# * Target the dev image as we want to run tests and linting in this image.
# * Tag as both the ci image for use in later stages and the latest ci image to
# cache any future ci pipeline runs.
docker build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
$EXTRA_BUILD_ARGS \
$IMAGE_LABELS \
--target dev \
--tag $CI_IMAGE_PATH \
--tag $LATEST_CI_IMAGE \
-f $DOCKERFILE_PATH .;
# ===== 3. Push the CI image for the next stages and latest ci image cache =====
docker push $CI_IMAGE_PATH
docker push $LATEST_CI_IMAGE
# Builds a non-dev (no docker build target provided) and fully labelled final image
# and tags and pushes the non-dev and dev images using $TESTED_IMAGE_PREFIX to mark
# them as being successfully tested for the publishing jobs to use.
#
# To extend this stage set the DOCKERFILE_PATH, IMAGE_NAME and DEV_IMAGE_NAME variables.
.build-final-baserow-image:
image: $CI_UTIL_IMAGE
stage: build-final
interruptible: true
# Prevent rebuilds when tagging as all we want to do is tag and push
except:
refs:
- pipelines
variables:
- $CI_COMMIT_TAG
services:
- docker:20.10.12-dind
variables:
DOCKER_BUILDKIT: 1
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
IMAGE_LABELS: >
--label org.opencontainers.image.vendor=$CI_PROJECT_URL
--label org.opencontainers.image.authors=$CI_PROJECT_URL
--label org.opencontainers.image.revision=$CI_COMMIT_SHA
--label org.opencontainers.image.source=$CI_PROJECT_URL
--label org.opencontainers.image.documentation=$CI_PROJECT_URL
--label org.opencontainers.image.licenses=$CI_PROJECT_URL
--label org.opencontainers.image.url=$CI_PROJECT_URL
--label vcs-url=$CI_PROJECT_URL
--label com.gitlab.ci.user=$CI_SERVER_URL/$GITLAB_USER_LOGIN
--label com.gitlab.ci.email=$GITLAB_USER_EMAIL
--label com.gitlab.ci.tagorbranch=$CI_COMMIT_REF_NAME
--label com.gitlab.ci.pipelineurl=$CI_PIPELINE_URL
--label com.gitlab.ci.commiturl=$CI_PROJECT_URL/commit/$CI_COMMIT_SHA
--label com.gitlab.ci.cijoburl=$CI_JOB_URL
--label com.gitlab.ci.mrurl=$CI_PROJECT_URL/-/merge_requests/$CI_MERGE_REQUEST_ID
--label org.opencontainers.image.ref.name=$RELEASE_IMAGE_REPO:$CI_COMMIT_REF_NAME
script:
- |
echo "$CI_REGISTRY_PASSWORD" | \
docker login -u "$CI_REGISTRY_USER" "$CI_REGISTRY" --password-stdin
if [[ -z "$DOCKERFILE_PATH" ]]; then
echo "Must provide DOCKERFILE_PATH as a job variable" 2>&1
exit 1
fi
if [[ -z "$IMAGE_NAME" ]]; then
echo "Must provide IMAGE_NAME as a job variable" 2>&1
exit 1
fi
if [[ -z "$DEV_IMAGE_NAME" ]]; then
echo "Must provide DEV_IMAGE_NAME as a job variable" 2>&1
exit 1
fi
# ===== 1. Setup image metadata labels =====
#Build date for opencontainers
#rfc 3339 date
BUILDDATE="'$(date '+%FT%T%z' | sed -E -n 's/(\+[0-9]{2})([0-9]{2})$/\1:\2/p')'"
IMAGE_LABELS="$IMAGE_LABELS --label org.opencontainers.image.created=$BUILDDATE"
IMAGE_LABELS="$IMAGE_LABELS --label build-date=$BUILDDATE"
# Description for opencontainers
BUILDTITLE="$(echo $CI_PROJECT_TITLE | tr " " "_")_$IMAGE_NAME"
IMAGE_LABELS="$IMAGE_LABELS --label org.opencontainers.image.title=$BUILDTITLE"
IMAGE_LABELS="$IMAGE_LABELS --label org.opencontainers.image.description=$BUILDTITLE"
# ==== 2. Tag, build and push non-dev image ====
# Cache from the CI dev image to build the non dev image.
CI_IMAGE_PATH=$CI_IMAGE_REPO/$DEV_IMAGE_NAME:$CLEANUP_JOB_CI_TAG_PREFIX$CI_COMMIT_SHORT_SHA
TRUNCATED_BRANCH_NAME=${CI_COMMIT_REF_NAME:0:100}
NON_DEV_CACHE_IMAGE=$CI_IMAGE_REPO/$IMAGE_NAME:$CLEANUP_JOB_CI_TAG_PREFIX$TRUNCATED_BRANCH_NAME
TARGET_NON_DEV_IMAGE_PATH=$CI_IMAGE_REPO/$IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
TARGET_DEV_IMAGE_PATH=$CI_IMAGE_REPO/$DEV_IMAGE_NAME:$TESTED_IMAGE_PREFIX$CI_COMMIT_SHORT_SHA
docker pull $CI_IMAGE_PATH
if ! docker pull $NON_DEV_CACHE_IMAGE ; then
echo "Failed to find non dev cache image $NON_DEV_CACHE_IMAGE..."
EXTRA_BUILD_ARGS="";
else
echo "Caching from $NON_DEV_CACHE_IMAGE";
EXTRA_BUILD_ARGS="--cache-from $NON_DEV_CACHE_IMAGE";
fi
if [[ -n "$BUILD_FROM_IMAGE" ]]; then
EXTRA_BUILD_ARGS="$EXTRA_BUILD_ARGS --build-arg FROM_IMAGE=$BUILD_FROM_IMAGE";
IMAGE_LABELS="$IMAGE_LABELS --label built-from-image=$BUILD_FROM_IMAGE"
if docker pull "$BUILD_FROM_IMAGE"; then
BUILT_FROM_REVISION=$(docker inspect $BUILD_FROM_IMAGE | jq -r '.[0].Config.Labels["org.opencontainers.image.revision"]')
BUILT_FROM_COMMITURL=$(docker inspect $BUILD_FROM_IMAGE | jq -r '.[0].Config.Labels["com.gitlab.ci.commiturl"]')
BUILT_FROM_CIJOBURL=$(docker inspect $BUILD_FROM_IMAGE | jq -r '.[0].Config.Labels["com.gitlab.ci.cijoburl"]')
BUILT_FROM_MRURL=$(docker inspect $BUILD_FROM_IMAGE | jq -r '.[0].Config.Labels["com.gitlab.ci.mrurl"]')
BUILT_FROM_VCSURL=$(docker inspect $BUILD_FROM_IMAGE | jq -r '.[0].Config.Labels["vcs-url"]')
IMAGE_LABELS="$IMAGE_LABELS --label built-from-revision=$BUILD_FROM_REVISION"
IMAGE_LABELS="$IMAGE_LABELS --label built-from-commiturl=$BUILD_FROM_COMMITURL"
IMAGE_LABELS="$IMAGE_LABELS --label built-from-cijoburl=$BUILD_FROM_CIJOBURL"
IMAGE_LABELS="$IMAGE_LABELS --label built-from-mrurl=$BUILD_FROM_MRURL"
IMAGE_LABELS="$IMAGE_LABELS --label built-from-vcsurl=$BUILD_FROM_VCSURL"
else
echo "Failed to pull build from image $BUILD_FROM_IMAGE, something has gone wrong"
exit 1
fi
fi
# Build the normal non-dev image with all the tags and labels.
docker build \
--cache-from $CI_IMAGE_PATH \
$EXTRA_BUILD_ARGS \
$FORMATTEDTAGLIST \
$IMAGE_LABELS \
-t $TARGET_NON_DEV_IMAGE_PATH \
-f $DOCKERFILE_PATH .;
docker push $TARGET_NON_DEV_IMAGE_PATH
# Build the cache image with layer caching enabled. We don't enable it for the image above to reduce its size.
docker build \
--cache-from $CI_IMAGE_PATH \
$EXTRA_BUILD_ARGS \
$IMAGE_LABELS \
--build-arg BUILDKIT_INLINE_CACHE=1 \
-t $NON_DEV_CACHE_IMAGE \
-f $DOCKERFILE_PATH .;
docker push $NON_DEV_CACHE_IMAGE
docker tag $CI_IMAGE_PATH $TARGET_DEV_IMAGE_PATH
docker push $TARGET_DEV_IMAGE_PATH
# A simple docker based test job which does not run for a TAG pipeline and does not
# check out git.
.docker-image-test-stage:
stage: test
image: $CI_UTIL_IMAGE
interruptible: true
# Prevent rebuilds when tagging as all we want to do is tag and push
except:
refs:
- pipelines
variables:
- $CI_COMMIT_TAG
services:
- docker:20.10.12-dind
# Pushes $SOURCE_IMAGE to $TARGET_IMAGE using the $TARGET_REGISTRY_PASSWORD,
# $TARGET_REGISTRY_USER and $TARGET_REGISTRY credentials.
#
# Set $SKIP_IF_TAG_NOT_ON_BRANCH to make the job skip if the commit is not on
# the specified branch. Useful for TAG pipelines when $CI_COMMIT_BRANCH is not set
# and so we need to do some extra git work to figure out what branches this commit is
# on.
#
# Set $SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH to a branch name. If the job is not
# for a commit which is the latest on the specified branch name (for example due to
# someone re-running a pipeline for an old commit) this job will be skipped.
.publish-baserow-image:
image: $CI_UTIL_IMAGE
stage: publish
services:
- docker:20.10.12-dind
except:
refs:
- pipelines
variables:
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
allow_failure:
# By exiting with this code we can skip this step without failing the build,
# but still fail if something else goes wrong.
exit_codes: 137
script:
- |
if [[ -n "$SKIP_IF_TAG_NOT_ON_BRANCH" ]]; then
# Query for all the branches that this commit is part of.
curl -s --header "JOB-TOKEN: $CI_JOB_TOKEN" \
"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/repository/commits/$CI_COMMIT_SHA/refs?type=branch" \
-o this_commits_branches.json;
# Extract just the branch names from the json so we can assert it matches.
TAG_BRANCH_NAMES=$(cat this_commits_branches.json | jq -r ".[].name")
NUM_BRANCHES=$(cat this_commits_branches.json | jq length)
# Ensure the commit is only on master and no other branches, otherwise someone
# could checkout a master commit as a new branch and tag it to cause an image
# upload.
if [[ "$NUM_BRANCHES" != "1" || "$TAG_BRANCH_NAMES" != "$SKIP_IF_TAG_NOT_ON_BRANCH" ]]; then
echo "Tags should never be applied to non $SKIP_IF_TAG_NOT_ON_BRANCH branches!" 2>&1;
echo "Pipeline is running for tag: $CI_COMMIT_TAG which for a commit that only appears on $SKIP_IF_TAG_NOT_ON_BRANCH and no other branches." 2>&1;
echo "Instead this commit appears on $NUM_BRANCHES branches called $TAG_BRANCH_NAMES" 2>&1;
exit 1;
fi
fi
if [[ -n "$SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH" ]]; then
LATEST_COMMIT_HASH=$(git rev-parse origin/$SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH)
HEAD_COMMIT_HASH=$CI_COMMIT_SHA
if [[ "$LATEST_COMMIT_HASH" != "$HEAD_COMMIT_HASH" ]]; then
echo "Pipeline is not running for latest commit on origin/$SKIP_IF_NOT_LATEST_COMMIT_ON_BRANCH";
echo " which has commit $LATEST_COMMIT_HASH.";
echo "Instead pipeline is running on commit $HEAD_COMMIT_HASH, exitting as configured to do so in this situation...";
exit 137;
fi
fi
echo "$TARGET_REGISTRY_PASSWORD" | docker login -u "$TARGET_REGISTRY_USER" "$TARGET_REGISTRY" --password-stdin
if ! docker pull $SOURCE_IMAGE; then
echo "Could not pull $SOURCE_IMAGE, has the build pipeline finished yet?" 2>&1;
exit 1
fi
docker tag $SOURCE_IMAGE $TARGET_IMAGE
docker push $TARGET_IMAGE
.skippable-job:
before_script:
- |
if [[ -z "$SKIP_JOB_NAME" ]]; then
echo "Must provide SKIP_JOB_NAME as a job variable" 2>&1
exit 1
fi
if [[ "$ENABLE_JOB_SKIPPING" = "true" ]]; then
try_download_latest_successful_artifacts_for_commit(){
COMMIT_HASH=$1
JOB_NAME=$2
echo -e "\e[0Ksection_start:`date +%s`:$COMMIT_HASH$JOB_NAME[collapsed=true]\r\e[0KPrevious successful run check for $JOB_NAME and $COMMIT_HASH"
URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/repository/commits/$COMMIT_HASH/statuses?name=$JOB_NAME"
COMMIT_GITLAB_JOBS=$(curl --header "PRIVATE-TOKEN: $PROJECT_READ_ONLY_API_TOKEN" $URL)
if [[ "$COMMIT_GITLAB_JOBS" ]]; then
echo "Got these job statuses: $COMMIT_GITLAB_JOBS"
JOB_ID=$(echo $COMMIT_GITLAB_JOBS| jq "[.[] | select(.status == \"success\")][0].id")
# Check if JOB_ID is an integer (POSIX compliant way)
if [ "$JOB_ID" -eq "$JOB_ID" ] 2> /dev/null; then
if [[ -n "$DOWNLOAD_AND_UNPACK_ARTIFACTS_ON_SKIP" ]] ; then
exit_code=0
curl --fail --location --output artifacts.zip \
--header "PRIVATE-TOKEN: $PROJECT_READ_ONLY_API_TOKEN" \
"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/jobs/$JOB_ID/artifacts" \
|| exit_code=$?;
if [ ${exit_code} -ne 0 ]; then
echo "Failed to get artifacts from successful run $JOB_ID"
else
unzip artifacts.zip || exit_code=$?
if [ ${exit_code} -ne 0 ]; then
echo "Failed to unzip artifacts"
else
if [[ -f "reports/stdout.txt" ]]; then
cat reports/stdout.txt;
fi
echo "Skipping $JOB_NAME as previous successful run for $COMMIT_HASH and it's artifacts were found."
exit 0;
fi
fi
else
echo "Skipping $JOB_NAME as previous successful build for $COMMIT_HASH were found.".
exit 0;
fi
else
echo "Failed to find successful run of $JOB_NAME in $COMMIT_GITLAB_JOBS"
fi
else
echo "Failed to query gitlab for jobs";
fi
echo -e "\e[0Ksection_end:`date +%s`:$COMMIT_HASH$JOB_NAME\r\e[0K"
}
SECOND_PARENT_COMMIT=$(git rev-list -1 --merges ${CI_COMMIT_SHA}~1..${CI_COMMIT_SHA})
if [[ -z "$SECOND_PARENT_COMMIT" ]] ; then
# If there is no second parent commit then there is only one parent commit
# and so we can safely look for its artifacts.
PREVIOUS_COMMIT_SHA=$(git rev-parse HEAD~1)
# Search for successful runs of either the normal job or this job itself
# for either this or previous commit.
try_download_latest_successful_artifacts_for_commit $CI_COMMIT_SHA $SKIP_JOB_NAME
try_download_latest_successful_artifacts_for_commit $CI_COMMIT_SHA $CI_JOB_NAME
try_download_latest_successful_artifacts_for_commit $PREVIOUS_COMMIT_SHA $SKIP_JOB_NAME
try_download_latest_successful_artifacts_for_commit $PREVIOUS_COMMIT_SHA $CI_JOB_NAME
echo "Actually running job as successful run for previous or this commit not found"
else
# There is a second (or more) parent commit meaning we should re-run this job
# as a merge has happened.
echo "Running full job as this is a merge commit."
fi
else
echo "Force running job regardless of previous runs."
fi

View file

@ -0,0 +1,9 @@
# A small helper image which has some useful tools pre-installed that are used by ci
# stages. By building our own little image it means every single ci job doesn't need
# to repeatedly re-install these tools when they run.
FROM docker:20.10.12
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache curl git jq python3 openssh-client && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools
RUN mkdir /baserow && python3 -m venv /baserow/venv && . /baserow/venv/bin/activate && pip3 install coverage

12
backend/.coveragerc Normal file
View file

@ -0,0 +1,12 @@
[run]
# Also required for gitlab MR coverage to be shown correctly.
relative_files = True
omit =
*/generated/*
# We can't set source as it changes the xml reports file paths to be relative from
# say `backend/src` instead of the root of the repo. Gitlab needs paths to be relative
# from the root so instead we just set include which ensures in gitlab MR coverage is
# shown correctly.
include =
backend/src/**/*
premium/backend/src/**/*

1140
backend/.test_durations Normal file

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
FROM python:3.7-slim-buster
FROM python:3.7-slim-buster as base
ARG UID
ENV UID=${UID:-9999}
@ -19,6 +19,7 @@ RUN apt-get update && \
dos2unix \
tini \
postgresql-client \
gettext \
&& apt-get autoclean \
&& apt-get clean \
&& apt-get autoremove \
@ -26,13 +27,27 @@ RUN apt-get update && \
USER $UID:$GID
COPY --chown=$UID:$GID ./backend/requirements/base.txt /baserow/requirements/
# Disable the path warn as we set the correct path in the entrypoint when it is easy
# to know the users $HOME/.local/bin location. Changing path in the docker image does
# not work as we do not know where $HOME when using an ENV command.
RUN pip3 install --no-warn-script-location -r /baserow/requirements/base.txt
# In slim docker images, mime.types is removed and we need it for mimetypes guessing
COPY --chown=$UID:$GID ./backend/docker/mime.types /etc/
# Install non-dev base dependencies into a virtual env.
COPY --chown=$UID:$GID ./backend/requirements/base.txt /baserow/requirements/
RUN python3 -m venv /baserow/venv
RUN . /baserow/venv/bin/activate && pip3 install -r /baserow/requirements/base.txt
# Build a dev_deps stage which also has the dev dependencies for use by the dev layer.
FROM base as dev_deps
COPY ./backend/requirements/dev.txt /baserow/requirements/
RUN . /baserow/venv/bin/activate && pip3 install -r /baserow/requirements/dev.txt
# The core stage contains all of Baserows source code and sets up the entrypoint
FROM base as core
# Copy over backend code.
COPY --chown=$UID:$GID ./docs /baserow/docs
# TODO - This copy also re-copies the requirements above, meaning this will be re-run
# and not cached even though we already have separate layers above.
COPY --chown=$UID:$GID ./backend /baserow/backend
COPY --chown=$UID:$GID ./premium/backend /baserow/premium/backend
@ -42,10 +57,24 @@ WORKDIR /baserow/backend
# the application rather than buffering it.
ENV PYTHONUNBUFFERED 1
ENV PYTHONPATH $PYTHONPATH:/baserow/backend/src:/baserow/premium/backend/src
ENV DJANGO_SETTINGS_MODULE='baserow.config.settings.base'
ENTRYPOINT ["/usr/bin/tini", "--", "/bin/bash", "/baserow/backend/docker/docker-entrypoint.sh"]
EXPOSE 8000
RUN dos2unix /baserow/backend/docker/docker-entrypoint.sh && \
chmod a+x /baserow/backend/docker/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/tini", "--", "/bin/bash", "/baserow/backend/docker/docker-entrypoint.sh"]
FROM core as dev
# Override virtualenv with one containing dev dependencies.
COPY --chown=$UID:$GID --from=dev_deps /baserow/venv /baserow/venv
# Override env variables and initial cmd to start up in dev mode.
ENV DJANGO_SETTINGS_MODULE='baserow.config.settings.dev'
CMD ["dev"]
FROM core as local
ENV DJANGO_SETTINGS_MODULE='baserow.config.settings.base'
CMD ["local"]

View file

@ -16,5 +16,37 @@ format:
test:
pytest tests ../premium/backend/tests || exit;
test-regenerate-ci-durations:
pytest tests ../premium/backend/tests --store-durations || exit;
test-parallel:
pytest tests ../premium/backend/tests -n 10 || exit;
PYTEST_SPLITS:=1
PYTEST_SPLIT_GROUP:=1
ci-test-python:
mkdir reports/ -p; \
cd ..; \
COVERAGE_FILE=backend/reports/.coverage.$(PYTEST_SPLIT_GROUP) \
coverage run \
--rcfile=backend/.coveragerc \
-m pytest \
--durations-path=backend/.test_durations \
--splits $(PYTEST_SPLITS) \
--group $(PYTEST_SPLIT_GROUP) \
--junitxml=backend/reports/report.xml \
backend/tests \
premium/backend/tests;
generate-html-coverage-report:
mkdir html_coverage/ -p; \
cd ..; \
coverage run --rcfile=backend/.coveragerc -m pytest \
backend/tests \
premium/backend/tests; \
coverage html -d html_coverage/;
ci-check-startup-python:
timeout --preserve-status 10s \
gunicorn --workers=1 -b 0.0.0.0:8002 \
-k uvicorn.workers.UvicornWorker baserow.config.asgi:application;

View file

@ -1,55 +0,0 @@
FROM python:3.7-slim-buster
# Default to 1000 as this is probably the running users UID.
ARG UID
ENV UID=${UID:-1000}
ARG GID
ENV GID=${GID:-1000}
# We might be running as a user which already exists in this image. In that situation
# Everything is OK and we should just continue on.
RUN groupadd -g $GID baserow_docker_group || exit 0
RUN useradd --shell /bin/bash -u $UID -g $GID -o -c "" -m baserow_docker_user || exit 0
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
curl \
gnupg2 \
libpq-dev \
dos2unix \
tini \
postgresql-client \
gettext \
&& apt-get autoclean \
&& apt-get clean \
&& apt-get autoremove \
&& rm -rf /var/lib/apt/lists/*
USER $UID:$GID
COPY --chown=$UID:$GID ./backend/requirements /baserow/requirements
# In slim docker images, mime.types is removed and we need it for mimetypes guessing
COPY --chown=$UID:$GID ./backend/docker/mime.types /etc/
# Disable the path warn as we set the correct path in the entrypoint when it is easy
# to know the users $HOME/.local/bin location. Changing path in the docker image does
# not work as we do not know where $HOME when using an ENV command.
RUN pip3 install --no-warn-script-location -r /baserow/requirements/base.txt -r /baserow/requirements/dev.txt
COPY --chown=$UID:$GID ./docs /baserow/docs
COPY --chown=$UID:$GID ./backend /baserow/backend
COPY --chown=$UID:$GID ./premium/backend /baserow/premium/backend
WORKDIR /baserow/backend
# Ensure that Python outputs everything that's printed inside
# the application rather than buffering it.
ENV PYTHONUNBUFFERED 1
ENV PYTHONPATH $PYTHONPATH:/baserow/backend/src:/baserow/premium/backend/src
ENV DJANGO_SETTINGS_MODULE='baserow.config.settings.dev'
RUN dos2unix /baserow/backend/docker/docker-entrypoint.sh && \
chmod a+x /baserow/backend/docker/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/tini", "--", "/bin/bash", "/baserow/backend/docker/docker-entrypoint.sh"]
CMD ["dev"]

View file

@ -9,8 +9,7 @@ DATABASE_USER="${DATABASE_USER:-postgres}"
DATABASE_HOST="${DATABASE_HOST:-db}"
DATABASE_PORT="${DATABASE_PORT:-5432}"
# Ensure the installed python dependencies are on the path and available.
export PATH="$PATH:$HOME/.local/bin"
source "/baserow/venv/bin/activate"
postgres_ready() {
python << END
@ -43,18 +42,24 @@ done
show_help() {
# If you change this please update ./docs/reference/baserow-docker-api.md
echo """
Usage: docker run <imagename> COMMAND
Usage: docker run [-T] baserow_backend[_dev] COMMAND
Commands
local : Start django using a prod ready gunicorn server
dev : Start a normal Django development server
bash : Start a bash shell
manage : Start manage.py
python : Run a python command
shell : Start a Django Python shell
celery : Run celery
celery-dev: Run a hot-reloading dev version of celery
lint: : Run the linting
help : Show this message
local : Start django using a prod ready gunicorn server
dev : Start a normal Django development server
exec : Exec a command directly
bash : Start a bash shell
manage : Start manage.py
setup : Runs all setup commands (migrate, update_formulas, sync_templates)
python : Run a python command
shell : Start a Django Python shell
celery : Run celery
celery-dev: : Run a hot-reloading dev version of celery
lint: : Run the linting (only available if using dev target)
lint-exit : Run the linting and exit (only available if using dev target)
test: : Run the tests (only available if using dev target)
ci-test: : Run the tests for ci including various reports (dev only)
ci-check-startup: Start up a single gunicorn and timeout after 10 seconds for ci (dev).
help : Show this message
"""
}
@ -87,6 +92,9 @@ case "$1" in
run_setup_commands_if_configured
exec gunicorn --workers=3 -b 0.0.0.0:"${PORT}" -k uvicorn.workers.UvicornWorker baserow.config.asgi:application
;;
exec)
exec "${@:2}"
;;
bash)
exec /bin/bash "${@:2}"
;;
@ -96,14 +104,31 @@ case "$1" in
python)
exec python "${@:2}"
;;
setup)
echo "python /baserow/backend/src/baserow/manage.py migrate"
python /baserow/backend/src/baserow/manage.py migrate
echo "python /baserow/backend/src/baserow/manage.py update_formulas"
python /baserow/backend/src/baserow/manage.py update_formulas
echo "python /baserow/backend/src/baserow/manage.py sync_templates"
python /baserow/backend/src/baserow/manage.py sync_templates
;;
shell)
exec python /baserow/backend/src/baserow/manage.py shell
;;
lint)
lint-shell)
CMD="make lint-python"
echo "$CMD"
exec bash --init-file <(echo "history -s $CMD; $CMD")
;;
lint)
exec make lint-python
;;
ci-test)
exec make ci-test-python PYTEST_SPLITS="${PYTEST_SPLITS:-1}" PYTEST_SPLIT_GROUP="${PYTEST_SPLIT_GROUP:-1}"
;;
ci-check-startup)
exec make ci-check-startup-python
;;
celery)
exec celery -A baserow "${@:2}"
;;
@ -123,7 +148,8 @@ case "$1" in
exec bash --init-file <(echo "history -s $CMD; $CMD")
;;
*)
echo "${@:2}"
show_help
exit 1
;;
esac
esac

View file

@ -18,3 +18,6 @@ django-silk==4.2.0
django-extensions==3.1.5
snoop==0.4.1
openapi-spec-validator==0.4.0
pytest-html==3.1.1
coverage==6.2
pytest-split==0.6.0

View file

@ -28,6 +28,7 @@
* Fix Django's default index naming scheme causing index name collisions.
* Workaround bug in Django's schema editor sometimes causing incorrect transaction
rollbacks resulting in the connection to the database becoming unusable.
* Rework Baserow docker images so they can be built and tested by gitlab CI.
## Released (2022-01-13 1.8.2)

6
dev.sh
View file

@ -232,6 +232,10 @@ else
echo "./dev.sh Using the already set value for the env variable SYNC_TEMPLATES_ON_STARTUP = $SYNC_TEMPLATES_ON_STARTUP"
fi
# Enable buildkit for faster builds with better caching.
export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
echo "./dev.sh running docker-compose commands:
------------------------------------------------
"
@ -274,5 +278,5 @@ if [ "$dont_attach" != true ] && [ "$up" = true ] ; then
"/bin/bash /baserow/web-frontend/docker/docker-entrypoint.sh lint-fix"
launch_tab_and_exec "backend lint" \
"backend" \
"/bin/bash /baserow/backend/docker/docker-entrypoint.sh lint"
"/bin/bash /baserow/backend/docker/docker-entrypoint.sh lint-shell"
fi

View file

@ -1,4 +1,4 @@
version: "3"
version: "3.4"
services:
@ -8,8 +8,7 @@ services:
backend:
build:
context: .
dockerfile: ./backend/docker/Dockerfile.dev
target: dev
args:
# We allow configuring the UID/GID here so you can run as the dev's actual user
# reducing the chance the containers screw up the bind mounted folders.
@ -26,8 +25,7 @@ services:
celery:
image: baserow_backend_dev:latest
build:
context: .
dockerfile: ./backend/docker/Dockerfile.dev
target: dev
args:
# We allow configuring the UID/GID here so you can run as the dev's actual user
# reducing the chance the containers screw up the bind mounted folders.
@ -44,8 +42,7 @@ services:
celery-export-worker:
image: baserow_backend_dev:latest
build:
context: .
dockerfile: ./backend/docker/Dockerfile.dev
target: dev
args:
# We allow configuring the UID/GID here so you can run as the dev's actual user
# reducing the chance the containers screw up the bind mounted folders.
@ -62,8 +59,7 @@ services:
celery-beat-worker:
image: baserow_backend_dev:latest
build:
context: .
dockerfile: ./backend/docker/Dockerfile.dev
target: dev
args:
# We allow configuring the UID/GID here so you can run as the dev's actual user
# reducing the chance the containers screw up the bind mounted folders.
@ -79,8 +75,7 @@ services:
web-frontend:
build:
context: .
dockerfile: ./web-frontend/docker/Dockerfile.dev
target: dev
args:
# We allow configuring the UID/GID here so you can run as the dev's actual user
# reducing the chance the containers screw up the bind mounted folders.

View file

@ -1,4 +1,4 @@
version: "3"
version: "3.4"
services:
db:
@ -63,6 +63,9 @@ services:
celery:
image: baserow_backend:latest
build:
dockerfile: ./backend/Dockerfile
context: .
environment:
- ADDITIONAL_APPS
- EMAIL_SMTP
@ -73,9 +76,6 @@ services:
- EMAIL_SMTP_PASSWORD
- FROM_EMAIL
- DISABLE_ANONYMOUS_PUBLIC_VIEW_WS_CONNECTIONS
build:
dockerfile: ./backend/Dockerfile
context: .
command: celery worker -l INFO -Q celery
depends_on:
- backend

View file

@ -23,10 +23,8 @@ This whole directory is also added to the backend container.
source directory. This file is registered as a command via the `setup.py`. When
someone adds Baserow as a dependency they can use the command `baserow migrate` which
is the same as `python src/baserow/manage.py migrate`.
* `Dockerfile`: the dockerfile that is used to build the image of the
backend for running baserow on your local machine.
* `docker/Dockerfile.dev`: the dockerfile that is used to build the development image
of the backend.
* `Dockerfile`: Builds an image containing just the backend service, build with
`--target dev` to instead get a dev ready image.
* `Makefile`: contains a few commands to install the dependencies, run the linter and
run the tests.
* `pytest.ini`: pytest configuration when running the tests.
@ -85,10 +83,8 @@ web frontend. This whole directory is also added to the web-frontend container.
* `.eslintrc.js`: the configuration for the eslint linter.
* `.prettierrc`: configuration for prettier.
* `.stylelintrc`: configuration for stylelint which lints the scss code.
* `Dockerfile`: the dockerfile that is used to build the image of the
web-frontend for running baserow on your local machine.
* `docker/Dockerfile.dev`: the dockerfile that is used to build the development image
of the web-frontend.
* `Dockerfile`: Builds an image containing just the web-frontend service, build with
`--target dev` to instead get a dev ready image.
* `intellij-idea.webpack.config.js` a webpack config file that can be used by Intellij
iDEA. It adds the correct aliases for the editor.
* `jest.config.js`: config file for running the tests with JEST.

View file

@ -12,9 +12,9 @@ Below are the files used by our docker setup and what they are responsible for:
- `docker-compose.yml`: A compose file which starts Baserow in local mode with no
development features enabled.
- `./backend/Dockerfile`: The backend's Dockerfile for local mode. See below for
- `./backend/Dockerfile`: The backend's Dockerfile. See below for
supported command line arguments. Also used to run the celery worker.
- `./web-frontend/Dockerfile`: The web-frontend's Dockerfile for local mode. See below
- `./web-frontend/Dockerfile`: The web-frontend's Dockerfile. See below
for supported command line arguments.
- `./media/Dockerfile`: A simple nginx image used to serve uploaded user files only.
@ -22,34 +22,41 @@ Below are the files used by our docker setup and what they are responsible for:
- `docker-compose.dev.yml`: A compose file which overrides parts of `docker-compose.yml`
to enable development features, do not use this in production.
- `./backend/docker/Dockerfile.dev`: The backends's Dockerfile for dev mode.
- `./web-frontend/docker/Dockerfile.dev`: The web-frontend's Dockerfile for dev mode.
- `./backend/docker/Dockerfile`: Build with `--target dev` to get the dev version.
- `./web-frontend/docker/Dockerfile`: Build with `--target dev` to get the dev version.
### For Both Envs
- `./backend/docker/docker-entrypoint.sh`: The entrypoint script used for both of the
backend images.
- `./web-frontend/docker/docker-entrypoint.sh`: The entrypoint script used for both of
the web-frontend images.
- `./backend/docker/docker-entrypoint.sh`: The entrypoint script used by the backend
Dockerfile, provides a set of commonly used commands for working with baserow.
- `./web-frontend/docker/docker-entrypoint.sh`: The entrypoint script used by the
web-frontend Dockerfile, provides a set of commonly used commands for working
with Baserow.
## Backend Image CLI
The `baserow_backend` and `baserow_backend_dev` images provide various commands used to
change what process is started inside the container.
```bash
Usage: docker run <imagename> COMMAND
```txt
Usage: docker run [-T] baserow_backend[_dev] COMMAND
Commands
local : Start django using a prod ready gunicorn server
dev : Start a normal Django development server
bash : Start a bash shell
manage : Start manage.py
python : Run a python command
shell : Start a Django Python shell
celery : Run celery
celery-dev: Run a hot-reloading dev version of celery
lint: : Run the linting
help : Show this message
local : Start django using a prod ready gunicorn server
dev : Start a normal Django development server
exec : Exec a command directly.
bash : Start a bash shell
manage : Start manage.py
setup : Runs all setup commands (migrate, update_formulas, sync_templates)
python : Run a python command
shell : Start a Django Python shell
celery : Run celery
celery-dev: : Run a hot-reloading dev version of celery
lint: : Run the linting (only available if using dev target)
lint-exit : Run the linting and exit (only available if using dev target)
test: : Run the tests (only available if using dev target)
ci-test: : Run the tests for ci including various reports (dev only)
ci-check-startup: Start up a single gunicorn and timeout after 10 seconds for ci (dev).
help : Show this message
```
You can run one of these as a one off command like so:
@ -66,13 +73,18 @@ $ ./dev.sh run backend COMMAND
The `baserow_web-frontend` and `baserow_web-frontend_dev` images provide various
commands used to change what process is started inside the container.
```bash
Usage: docker run <imagename> COMMAND
```txt
Usage: docker run [-T] baserow_web-frontend[_dev] COMMAND
Commands
dev : Start a normal nuxt development server
local : Start a non-dev prod ready nuxt server
lint : Run the linting
lint : Run all the linting
lint-fix : Run eslint fix
stylelint: Run stylelint
eslint : Run eslint
test : Run jest tests
ci-test : Run ci tests with reporting
exec : Exec a command directly.
bash : Start a bash shell
help : Show this message
```

View file

@ -1,4 +1,4 @@
FROM node:12-buster
FROM node:12-buster as base
ARG UID
ENV UID=${UID:-9999}
@ -38,7 +38,6 @@ RUN yarn install
COPY --chown=$UID:$GID ./web-frontend /baserow/web-frontend/
COPY --chown=$UID:$GID ./premium/web-frontend /baserow/premium/web-frontend/
RUN yarn run build-local
RUN dos2unix /baserow/web-frontend/docker/docker-entrypoint.sh && \
chmod a+x /baserow/web-frontend/docker/docker-entrypoint.sh
@ -46,5 +45,16 @@ RUN dos2unix /baserow/web-frontend/docker/docker-entrypoint.sh && \
# tini installed above protects us from zombie processes and ensures the default signal
# handlers work, see https://github.com/krallin/tini.
ENTRYPOINT ["/usr/bin/tini", "--", "/bin/bash", "/baserow/web-frontend/docker/docker-entrypoint.sh"]
EXPOSE 3000
FROM base as dev
# We don't bother running build-local in dev mode as it pre-compiles nuxt which won't
# be used when running the nuxt dev server.
CMD ["dev"]
FROM base as local
RUN yarn run build-local
CMD ["local"]

View file

@ -12,9 +12,15 @@ lint: eslint stylelint
lint-javascript: lint
jest:
yarn run jest || exit;
npx jest || exit;
test: jest
unit-test:
npx jest --selectProjects unit --selectProjects premium || exit;
ci-test-javascript:
JEST_JUNIT_OUTPUT_DIR=../reports/ npx jest --ci --collectCoverage --coverageDirectory="./reports/coverage/" || exit;
unit-test-watch:
yarn run jest test/unit --watch || exit;
npx jest test/unit --watch || exit;

View file

@ -1,47 +0,0 @@
FROM node:12-buster
ARG UID
ENV UID=${UID:-1000}
ARG GID
ENV GID=${GID:-1000}
# Perform all OS package installation and cleanup in one single command to reduce the
# size of the created layer.
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
curl \
gnupg2 \
dos2unix \
tini \
&& apt-get autoclean \
&& apt-get clean \
&& apt-get autoremove \
&& rm -rf /var/lib/apt/lists/*
# The node image already creates a non-root user to run as, update its ids so they
# match the provided UID and GID we wish to build and run this image with.
# If GID or UID already exist that's OK no need to stop the build.
RUN groupmod -g ${GID} node || exit 0
RUN usermod -u ${UID} -g ${GID} node || exit 0
USER $UID:$GID
# Create and install the dependencies in separate COPY commands
COPY --chown=$UID:$GID ./web-frontend/package.json ./web-frontend/yarn.lock /baserow/web-frontend/
WORKDIR /baserow/web-frontend
RUN yarn install
COPY --chown=$UID:$GID ./web-frontend /baserow/web-frontend/
COPY --chown=$UID:$GID ./premium/web-frontend /baserow/premium/web-frontend/
RUN dos2unix /baserow/web-frontend/docker/docker-entrypoint.sh && \
chmod a+x /baserow/web-frontend/docker/docker-entrypoint.sh
# tini installed above protects us from zombie processes and ensures the default signal
# handlers work, see https://github.com/krallin/tini.
ENTRYPOINT ["/usr/bin/tini", "--", "/bin/bash", "/baserow/web-frontend/docker/docker-entrypoint.sh"]
CMD ["dev"]

View file

@ -6,13 +6,18 @@ set -euo pipefail
show_help() {
# If you change this please update ./docs/reference/baserow-docker-api.md
echo """
Usage: docker run <imagename> COMMAND
Usage: docker run [-T] baserow_web-frontend[_dev] COMMAND
Commands
dev : Start a normal nuxt development server
local : Start a non-dev prod ready nuxt server
lint : Run the linting
lint : Run all the linting
lint-fix : Run eslint fix
stylelint: Run stylelint
eslint : Run eslint
test : Run jest tests
ci-test : Run ci tests with reporting
bash : Start a bash shell
exec : Exec a command directly
help : Show this message
"""
}
@ -38,11 +43,27 @@ case "$1" in
echo "$CMD"
exec bash --init-file <(echo "history -s $CMD; $CMD")
;;
eslint)
exec make eslint
;;
stylelint)
exec make eslint
;;
test)
exec make jest
;;
ci-test)
exec make ci-test-javascript
;;
exec)
exec "${@:2}"
;;
bash)
exec /bin/bash "${@:2}"
;;
*)
echo "${@:2}"
show_help
exit 1
;;
esac
esac

View file

@ -1,9 +1,11 @@
// The main jest config file used to run all of our tests.
// Setting reporters on the command line does not work so enable via this env variable
// we have to set anyway when using the junit reporter in CI.
const junitReporterConfig = process.env.JEST_JUNIT_OUTPUT_DIR
? {
reporters: ['default', '<rootDir>/web-frontend/node_modules/jest-junit'],
}
: {}
module.exports = {
// The rootDir used by jest must be the root of the repository so the premium tests
// and frontend code are contained within jest's rootDir. This is because:
// - Jest cannot collect coverage for files outside of its rootDir
// - Jest struggles to run tests which are outside of its rootDir.
rootDir: '..',
roots: ['<rootDir>/web-frontend/', '<rootDir>/premium/web-frontend'],
moduleDirectories: ['<rootDir>/web-frontend/node_modules/'],
@ -13,4 +15,18 @@ module.exports = {
'<rootDir>/premium/web-frontend/test/unit',
'<rootDir>/web-frontend/test/server',
],
coverageReporters: [
'text-summary',
['cobertura', { projectRoot: '/baserow/' }],
],
collectCoverageFrom: [
'<rootDir>/premium/web-frontend/modules/**/*.{js,Vue,vue}',
'<rootDir>/web-frontend/modules/**/*.{js,Vue,vue}',
'!**/node_modules/**',
'!**/.nuxt/**',
'!**/reports/**',
'!**/test/**',
'!**/generated/**',
],
...junitReporterConfig,
}

View file

@ -64,6 +64,7 @@
"eslint-plugin-vue": "^7.14.0",
"flush-promises": "^1.0.2",
"jest": "^26.6.3",
"jest-junit": "^13.0.0",
"jest-serializer-vue": "^2.0.2",
"jsdom": "^16.6.0",
"jsdom-global": "^3.0.2",

View file

@ -2423,6 +2423,11 @@ ansi-regex@^5.0.0:
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75"
integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==
ansi-regex@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
ansi-styles@^2.2.1:
version "2.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
@ -6925,6 +6930,16 @@ jest-jasmine2@^26.6.3:
pretty-format "^26.6.2"
throat "^5.0.0"
jest-junit@^13.0.0:
version "13.0.0"
resolved "https://registry.yarnpkg.com/jest-junit/-/jest-junit-13.0.0.tgz#479be347457aad98ae8a5983a23d7c3ec526c9a3"
integrity sha512-JSHR+Dhb32FGJaiKkqsB7AR3OqWKtldLd6ZH2+FJ8D4tsweb8Id8zEVReU4+OlrRO1ZluqJLQEETm+Q6/KilBg==
dependencies:
mkdirp "^1.0.4"
strip-ansi "^6.0.1"
uuid "^8.3.2"
xml "^1.0.1"
jest-leak-detector@^26.6.2:
version "26.6.2"
resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-26.6.2.tgz#7717cf118b92238f2eba65054c8a0c9c653a91af"
@ -11117,6 +11132,13 @@ strip-ansi@^6.0.0:
dependencies:
ansi-regex "^5.0.0"
strip-ansi@^6.0.1:
version "6.0.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
dependencies:
ansi-regex "^5.0.1"
strip-bom@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3"
@ -12033,7 +12055,7 @@ uuid@^3.3.2:
resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee"
integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==
uuid@^8.3.0:
uuid@^8.3.0, uuid@^8.3.2:
version "8.3.2"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2"
integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==
@ -12534,6 +12556,11 @@ xml-name-validator@^3.0.0:
resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a"
integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==
xml@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/xml/-/xml-1.0.1.tgz#78ba72020029c5bc87b8a81a3cfcd74b4a2fc1e5"
integrity sha1-eLpyAgApxbyHuKgaPPzXS0ovweU=
xmlchars@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb"