Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
291 changes: 10 additions & 281 deletions .githooks/pre-commit
Original file line number Diff line number Diff line change
@@ -1,289 +1,18 @@
#!/usr/bin/env bash
#
# Git pre-commit hook that delegates to the pre-commit framework.
# This file is kept for backwards compatibility with existing dev setups.
#
# To set up: git config core.hooksPath .githooks
#

set -Eeou pipefail

source scripts/dev/set_env_context.sh
source scripts/funcs/printing

if [ -f "${PROJECT_DIR}"/venv/bin/activate ]; then
source "${PROJECT_DIR}"/venv/bin/activate
if [ -f "${PROJECT_DIR}/venv/bin/activate" ]; then
source "${PROJECT_DIR}/venv/bin/activate"
fi

if [[ -z "${EVERGREEN_MODE:-}" ]]; then
# According to the latest SSDLC recommendations, the CI needs to always check all the files. Not just delta.
git_last_changed=$(git ls-tree -r origin/master --name-only)
else
git_last_changed=$(git diff --cached --name-only --diff-filter=ACM origin/master)
fi

mkdir -p "$(go env GOPATH)/bin"

update_mco_tests() {
echo "Regenerating MCO evergreen tests configuration"
python scripts/evergreen/e2e/mco/create_mco_tests.py >.evergreen-mco.yml
git add .evergreen-mco.yml
}

# Generates a yaml file to install the operator from the helm sources.
generate_standalone_yaml() {
HELM_OPTS=$@

charttmpdir=$(mktemp -d 2>/dev/null || mktemp -d -t 'charttmpdir')
charttmpdir=${charttmpdir}/chart
mkdir -p "${charttmpdir}"

FILES=(
"${charttmpdir}/mongodb-kubernetes/templates/operator-roles-base.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator-roles-clustermongodbroles.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator-roles-pvc-resize.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator-roles-telemetry.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator-roles-webhook.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/database-roles.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator-sa.yaml"
"${charttmpdir}/mongodb-kubernetes/templates/operator.yaml"
)

# generate normal public example
helm template --namespace mongodb -f helm_chart/values.yaml helm_chart --output-dir "${charttmpdir}" ${HELM_OPTS[@]}
cat "${FILES[@]}" >public/mongodb-kubernetes.yaml
cat "helm_chart/crds/"* >public/crds.yaml

# generate openshift public example
rm -rf "${charttmpdir:?}"/*
helm template --namespace mongodb -f helm_chart/values.yaml helm_chart --output-dir "${charttmpdir}" --values helm_chart/values-openshift.yaml ${HELM_OPTS[@]}
cat "${FILES[@]}" >public/mongodb-kubernetes-openshift.yaml

# generate openshift files for kustomize used for generating OLM bundle
rm -rf "${charttmpdir:?}"/*
helm template --namespace mongodb -f helm_chart/values.yaml helm_chart --output-dir "${charttmpdir}" --values helm_chart/values-openshift.yaml \
--set operator.webhook.registerConfiguration=false --set operator.webhook.installClusterRole=false ${HELM_OPTS[@]}

# update kustomize files for OLM bundle with files generated for openshift
cp "${charttmpdir}/mongodb-kubernetes/templates/operator.yaml" config/manager/manager.yaml
cp "${charttmpdir}/mongodb-kubernetes/templates/database-roles.yaml" config/rbac/database-roles.yaml
cp "${charttmpdir}/mongodb-kubernetes/templates/operator-roles-base.yaml" config/rbac/operator-roles-base.yaml
cp "${charttmpdir}/mongodb-kubernetes/templates/operator-roles-clustermongodbroles.yaml" config/rbac/operator-roles-clustermongodbroles.yaml
cp "${charttmpdir}/mongodb-kubernetes/templates/operator-roles-pvc-resize.yaml" config/rbac/operator-roles-pvc-resize.yaml
cp "${charttmpdir}/mongodb-kubernetes/templates/operator-roles-telemetry.yaml" config/rbac/operator-roles-telemetry.yaml

# generate multi-cluster public example
rm -rf "${charttmpdir:?}"/*
helm template --namespace mongodb -f helm_chart/values.yaml helm_chart --output-dir "${charttmpdir}" --values helm_chart/values-multi-cluster.yaml ${HELM_OPTS[@]}
cat "${FILES[@]}" >public/mongodb-kubernetes-multi-cluster.yaml

}

python_formatting() {
# installing Black
if ! command -v "black" >/dev/null; then
pip install -r requirements.txt
fi

echo "formatting isort"
isort .
echo "formatting black"
black .
}

generate_manifests() {
make manifests

git add config/crd/bases
git add helm_chart/crds
git add public/crds.yaml
}

update_values_yaml_files() {
# ensure that all helm values files are up to date.
# shellcheck disable=SC2154
python scripts/evergreen/release/update_helm_values_files.py

# commit any changes we made
git add helm_chart/values.yaml
git add helm_chart/values-openshift.yaml

# these can change if the version of community operator is different
git add go.mod
git add go.sum
}

update_release_json() {
# ensure that release.json is up 2 date
# shellcheck disable=SC2154
python scripts/evergreen/release/update_release.py

# commit any changes we made
git add release.json
}

regenerate_public_rbac_multi_cluster() {
if echo "$git_last_changed" | grep -q -e 'cmd/kubectl-mongodb' -e 'pkg/kubectl-mongodb'; then
echo 'regenerating multicluster RBAC public example'
pushd pkg/kubectl-mongodb/common/
EXPORT_RBAC_SAMPLES="true" go test ./... -run TestPrintingOutRolesServiceAccountsAndRoleBindings
popd
git add public/samples/multi-cluster-cli-gitops
fi
}

update_licenses() {
if [[ "${MDB_UPDATE_LICENSES:-""}" == "true" ]]; then
echo 'regenerating licenses'
time scripts/evergreen/update_licenses.sh 2>&1 | prepend "update_licenses"
git add LICENSE-THIRD-PARTY
fi
}

check_erroneous_kubebuilder_annotations() {
# Makes sure there are not erroneous kubebuilder annotations that can
# end up in CRDs as descriptions.
if grep "// kubebuilder" ./* -r --exclude-dir=vendor --include=\*.go; then
echo -e "${RED}Found an erroneous kubebuilder annotation${NO_COLOR}"
exit 1
fi
}

check_incorrect_makefile_variable_brackets() {
if find . -name "Makefile" | grep -v vendor | xargs grep "\${"; then
echo -e "${RED}ERROR: Makefiles should NEVER contain curly brackets variables${NO_COLOR}"
exit 1
fi
}

update_jobs() {
# Update release.json first in case there is a newer version
time update_release_json
# We need to generate the values files first
time update_values_yaml_files
# The values files are used for generating the standalone yaml
time generate_standalone_yaml
}

lint_code() {
scripts/evergreen/lint_code.sh
}

lint_helm_chart() {
scripts/dev/lint_helm_chart.sh
}

function validate_snippets() {
scripts/code_snippets/validate_snippets.py
}

# bg_job_ vars are global; run_job_in_background function is appending to them on each call
bg_job_pids=()
bg_job_pids_with_names=()

get_job_name() {
local search_pid="$1"
local match
match=$(printf '%s\n' "${bg_job_pids_with_names[@]}" | grep "^${search_pid}:")
echo "${match#*:}" # Remove everything up to and including the colon
}

# Executes function given on the first argument as background job.
# It's ensuring logs are properly prefixed by the name and
# the job's pid is captured in bg_jobs array in order to wait for completion.
run_job_in_background() {
job_name=$1
time ${job_name} 2>&1 | prepend "${job_name}" &

local job_pid=$!
bg_job_pids+=("${job_pid}")
bg_job_pids_with_names+=("${job_pid}:${job_name}")
echo "Started ${job_name} with PID: ${job_pid}"
}

# Waits for all background jobs stored in bg_job_pids and check their exit codes.
wait_for_all_background_jobs() {
failures=()
for pid in "${bg_job_pids[@]}"; do
wait "${pid}" || {
job_name=$(get_job_name "${pid}")
failures+=(" ${RED}${job_name} (PID ${pid})${NO_COLOR}")
}
done

if [[ ${#failures[@]} -gt 0 ]]; then
echo -e "${RED}Some checks have failed:${NO_COLOR}"
for failure in "${failures[@]}"; do
echo -e "$failure"
done
echo -e "${RED}To see the details look/filter for the job's logs by it's prefixed name (e.g. \"start_shellcheck:\", \"lint_code:\", \"shellcheck failed\").${NO_COLOR}"
return 1
fi

return 0
}

pre_commit() {
run_job_in_background "update_jobs"
run_job_in_background "update_licenses"
run_job_in_background "lint_code"
run_job_in_background "start_shellcheck"
run_job_in_background "regenerate_public_rbac_multi_cluster"
run_job_in_background "python_formatting"
run_job_in_background "check_erroneous_kubebuilder_annotations"
run_job_in_background "validate_snippets"

if wait_for_all_background_jobs; then
# lint_helm_chart must be run after all the background jobs are finished because one of the BG jobs (update_jobs)
# updates the helm chart. And lint_helm_chart requires the helm chart to be updated already.
lint_helm_chart

local lint_helm_chart_status=$?
if [ "$lint_helm_chart_status" -eq 0 ]; then
echo -e "${GREEN}pre-commit: All checks passed!${NO_COLOR}"
return 0
else
return 1
fi
else
return 1
fi
}

# Function to run shellcheck on a single file
run_shellcheck() {
local file="$1"

local diff_output
diff_output=$(shellcheck -f diff "$file" -e SC2154 -e SC1091 -e SC1090 -e SC2148 -o require-variable-braces -P "scripts" 2>&1)

if [[ -n "$diff_output" && "$diff_output" != *"Issues were detected, but none were auto-fixable"* ]]; then
echo "$diff_output" | git apply
echo "Applied auto-fixes for $file"
elif [[ "$diff_output" == *"Issues were detected, but none were auto-fixable"* ]]; then
echo -e "${RED}shellcheck failed on $file${NO_COLOR}"
shellcheck --color=always -x "$file" -e SC2154 -e SC1091 -e SC1090 -e SC2148 -o require-variable-braces -P "scripts"
return 1
fi
}

# Export function so it's available in subshells (for xargs)
export -f run_shellcheck

start_shellcheck() {
# shellcheck disable=SC2016
{
find scripts -type f -name "*.sh"
find scripts/dev/contexts -type f | grep -v private-context
find scripts/funcs -type f
find public/architectures -type f -name "*.sh"
find docs/ -type f -name "*.sh"
} | xargs -I {} -P 20 bash -c 'run_shellcheck "$1"' _ {}
}

cmd=${1:-"pre-commit"}

if [[ "${cmd}" == "generate_standalone_yaml" ]]; then
shift 1
generate_standalone_yaml "$@"
elif [[ "${cmd}" == "pre-commit" ]]; then
time pre_commit
elif [[ "${cmd}" == "shellcheck" ]]; then
start_shellcheck
elif [[ "${cmd}" == "lint" ]]; then
lint_code
fi
# Run pre-commit framework
exec pre-commit run --all-files
Loading