From 9f9b1849eb2113398df1a88f6b0d5380317f799e Mon Sep 17 00:00:00 2001 From: mr Date: Wed, 12 Nov 2025 13:13:43 +0100 Subject: [PATCH] Update K8S to include an auto generator of values template --- .gitignore | 3 +- README.md | 66 +- build_opencloud_microservices.sh | 18 - clone_opencloud_microservices.sh | 47 - create_kind_cluster.sh | 32 - delete_kind_cluster.sh | 3 - deployed_config | 9 +- install.sh | 8 - oc-k8s.sh | 267 ++++ opencloud/Chart.yaml | 4 + opencloud/README.md | 129 ++ opencloud/charts/prometheus/.helmignore | 24 + opencloud/charts/prometheus/Chart.lock | 15 + opencloud/charts/prometheus/Chart.yaml | 58 + opencloud/charts/prometheus/README.md | 392 +++++ .../charts/alertmanager/.helmignore | 26 + .../prometheus/charts/alertmanager/Chart.yaml | 26 + .../prometheus/charts/alertmanager/README.md | 62 + .../charts/alertmanager/templates/NOTES.txt | 21 + .../alertmanager/templates/_helpers.tpl | 81 + .../alertmanager/templates/configmap.yaml | 21 + .../alertmanager/templates/ingress.yaml | 47 + .../templates/ingressperreplica.yaml | 56 + .../charts/alertmanager/templates/pdb.yaml | 14 + .../templates/serviceaccount.yaml | 14 + .../templates/serviceperreplica.yaml | 44 + .../alertmanager/templates/services.yaml | 75 + .../alertmanager/templates/statefulset.yaml | 280 ++++ .../templates/tests/test-connection.yaml | 20 + .../charts/alertmanager/templates/vpa.yaml | 26 + .../charts/alertmanager/values.schema.json | 972 ++++++++++++ .../charts/alertmanager/values.yaml | 440 ++++++ .../charts/kube-state-metrics/.helmignore | 21 + .../charts/kube-state-metrics/Chart.yaml | 29 + .../charts/kube-state-metrics/README.md | 87 ++ .../kube-state-metrics/templates/NOTES.txt | 23 + .../kube-state-metrics/templates/_helpers.tpl | 186 +++ .../templates/ciliumnetworkpolicy.yaml | 33 + .../templates/clusterrolebinding.yaml | 20 + .../templates/crs-configmap.yaml | 16 + .../templates/deployment.yaml | 379 +++++ .../templates/extra-manifests.yaml | 4 + .../templates/kubeconfig-secret.yaml | 12 + .../templates/networkpolicy.yaml | 43 + .../kube-state-metrics/templates/pdb.yaml | 14 + .../templates/rbac-configmap.yaml | 22 + .../kube-state-metrics/templates/role.yaml | 236 +++ .../templates/rolebinding.yaml | 24 + .../templates/scrapeconfig.yaml | 60 + .../kube-state-metrics/templates/service.yaml | 53 + .../templates/serviceaccount.yaml | 18 + .../templates/servicemonitor.yaml | 120 ++ .../templates/stsdiscovery-role.yaml | 26 + .../templates/stsdiscovery-rolebinding.yaml | 17 + .../templates/verticalpodautoscaler.yaml | 44 + .../charts/kube-state-metrics/values.yaml | 611 ++++++++ .../prometheus-node-exporter/.helmignore | 23 + .../prometheus-node-exporter/Chart.yaml | 28 + .../charts/prometheus-node-exporter/README.md | 96 ++ .../templates/NOTES.txt | 29 + .../templates/_helpers.tpl | 237 +++ .../templates/clusterrole.yaml | 19 + .../templates/clusterrolebinding.yaml | 20 + .../templates/daemonset.yaml | 349 +++++ .../templates/endpoints.yaml | 18 + .../templates/extra-manifests.yaml | 4 + .../templates/networkpolicy.yaml | 27 + .../templates/podmonitor.yaml | 91 ++ .../templates/rbac-configmap.yaml | 16 + .../templates/service.yaml | 41 + .../templates/serviceaccount.yaml | 18 + .../templates/servicemonitor.yaml | 65 + .../templates/verticalpodautoscaler.yaml | 40 + .../prometheus-node-exporter/values.yaml | 616 ++++++++ .../charts/prometheus-pushgateway/.helmignore | 26 + .../charts/prometheus-pushgateway/Chart.yaml | 27 + .../charts/prometheus-pushgateway/README.md | 101 ++ .../templates/NOTES.txt | 19 + .../templates/_helpers.tpl | 297 ++++ .../templates/deployment.yaml | 32 + .../templates/extra-manifests.yaml | 8 + .../templates/ingress.yaml | 41 + .../templates/networkpolicy.yaml | 26 + .../prometheus-pushgateway/templates/pdb.yaml | 14 + .../templates/pushgateway-pvc.yaml | 29 + .../templates/secret.yaml | 24 + .../templates/service.yaml | 45 + .../templates/serviceaccount.yaml | 17 + .../templates/servicemonitor.yaml | 58 + .../templates/statefulset.yaml | 51 + .../charts/prometheus-pushgateway/values.yaml | 393 +++++ .../charts/prometheus/templates/NOTES.txt | 118 ++ .../charts/prometheus/templates/_helpers.tpl | 180 +++ .../prometheus/templates/clusterrole.yaml | 45 + .../templates/clusterrolebinding.yaml | 16 + opencloud/charts/prometheus/templates/cm.yaml | 107 ++ .../charts/prometheus/templates/deploy.yaml | 488 ++++++ .../prometheus/templates/extra-manifests.yaml | 4 + .../prometheus/templates/headless-svc.yaml | 32 + .../prometheus/templates/httproute.yaml | 45 + .../charts/prometheus/templates/ingress.yaml | 47 + .../prometheus/templates/network-policy.yaml | 16 + .../charts/prometheus/templates/pdb.yaml | 26 + .../charts/prometheus/templates/pvc.yaml | 40 + .../prometheus/templates/rolebinding.yaml | 18 + .../charts/prometheus/templates/service.yaml | 66 + .../prometheus/templates/serviceaccount.yaml | 16 + .../charts/prometheus/templates/vpa.yaml | 22 + .../charts/prometheus/values.schema.json | 739 +++++++++ opencloud/charts/prometheus/values.yaml | 1380 +++++++++++++++++ opencloud/templates/oc-auth/deployment.yaml | 12 +- opencloud/templates/oc-auth/service.yaml | 27 +- .../templates/oc-catalog/deployment.yaml | 6 +- opencloud/templates/oc-catalog/service.yaml | 27 +- .../templates/oc-datacenter/deployment.yaml | 10 +- .../templates/oc-datacenter/service.yaml | 28 +- opencloud/templates/oc-front/deployment.yaml | 10 +- opencloud/templates/oc-front/service.yaml | 27 +- opencloud/templates/oc-peer/deployment.yaml | 10 +- opencloud/templates/oc-peer/service.yaml | 23 + .../templates/oc-scheduler/deployment.yaml | 8 +- opencloud/templates/oc-scheduler/service.yaml | 23 + .../templates/oc-schedulerd/deployment.yaml | 8 +- .../templates/oc-schedulerd/service.yaml | 40 + opencloud/templates/oc-shared/deployment.yaml | 10 +- opencloud/templates/oc-shared/service.yaml | 23 + .../templates/oc-workflow/deployment.yaml | 10 +- opencloud/templates/oc-workflow/service.yaml | 25 +- .../templates/oc-workspace/deployment.yaml | 8 +- opencloud/templates/oc-workspace/service.yaml | 23 + opencloud/templates/openCloudConf.yaml | 4 +- opencloud/templates/openldap.yaml | 27 + opencloud/templates/prometheus.yaml | 15 + .../templates/prometheus/deployment.yaml | 22 - opencloud/templates/prometheus/service.yaml | 11 - opencloud/values.yaml.template | 588 +++++++ opencloud/{ => values}/dev-values.yaml | 6 +- opencloud/{ => values}/prod-values.yaml | 0 opencloud/values/test-values.yaml | 588 +++++++ uninstall.sh | 7 - upgrade.sh | 5 - 141 files changed, 13168 insertions(+), 211 deletions(-) delete mode 100755 build_opencloud_microservices.sh delete mode 100755 clone_opencloud_microservices.sh delete mode 100755 create_kind_cluster.sh delete mode 100755 delete_kind_cluster.sh delete mode 100755 install.sh create mode 100755 oc-k8s.sh create mode 100644 opencloud/README.md create mode 100644 opencloud/charts/prometheus/.helmignore create mode 100644 opencloud/charts/prometheus/Chart.lock create mode 100644 opencloud/charts/prometheus/Chart.yaml create mode 100644 opencloud/charts/prometheus/README.md create mode 100644 opencloud/charts/prometheus/charts/alertmanager/.helmignore create mode 100644 opencloud/charts/prometheus/charts/alertmanager/Chart.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/README.md create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/NOTES.txt create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/_helpers.tpl create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/configmap.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/ingress.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/ingressperreplica.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/pdb.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/serviceperreplica.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/services.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/statefulset.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/tests/test-connection.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/templates/vpa.yaml create mode 100644 opencloud/charts/prometheus/charts/alertmanager/values.schema.json create mode 100644 opencloud/charts/prometheus/charts/alertmanager/values.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/.helmignore create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/Chart.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/README.md create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/role.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/scrapeconfig.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/service.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml create mode 100644 opencloud/charts/prometheus/charts/kube-state-metrics/values.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/.helmignore create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/Chart.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/README.md create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-node-exporter/values.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/.helmignore create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/Chart.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/README.md create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/extra-manifests.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/secret.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml create mode 100644 opencloud/charts/prometheus/charts/prometheus-pushgateway/values.yaml create mode 100644 opencloud/charts/prometheus/templates/NOTES.txt create mode 100644 opencloud/charts/prometheus/templates/_helpers.tpl create mode 100644 opencloud/charts/prometheus/templates/clusterrole.yaml create mode 100644 opencloud/charts/prometheus/templates/clusterrolebinding.yaml create mode 100644 opencloud/charts/prometheus/templates/cm.yaml create mode 100644 opencloud/charts/prometheus/templates/deploy.yaml create mode 100644 opencloud/charts/prometheus/templates/extra-manifests.yaml create mode 100644 opencloud/charts/prometheus/templates/headless-svc.yaml create mode 100644 opencloud/charts/prometheus/templates/httproute.yaml create mode 100644 opencloud/charts/prometheus/templates/ingress.yaml create mode 100644 opencloud/charts/prometheus/templates/network-policy.yaml create mode 100644 opencloud/charts/prometheus/templates/pdb.yaml create mode 100644 opencloud/charts/prometheus/templates/pvc.yaml create mode 100644 opencloud/charts/prometheus/templates/rolebinding.yaml create mode 100644 opencloud/charts/prometheus/templates/service.yaml create mode 100644 opencloud/charts/prometheus/templates/serviceaccount.yaml create mode 100644 opencloud/charts/prometheus/templates/vpa.yaml create mode 100644 opencloud/charts/prometheus/values.schema.json create mode 100644 opencloud/charts/prometheus/values.yaml create mode 100644 opencloud/templates/oc-schedulerd/service.yaml create mode 100644 opencloud/templates/openldap.yaml create mode 100644 opencloud/templates/prometheus.yaml delete mode 100644 opencloud/templates/prometheus/deployment.yaml delete mode 100644 opencloud/templates/prometheus/service.yaml create mode 100644 opencloud/values.yaml.template rename opencloud/{ => values}/dev-values.yaml (98%) rename opencloud/{ => values}/prod-values.yaml (100%) create mode 100644 opencloud/values/test-values.yaml delete mode 100755 uninstall.sh delete mode 100755 upgrade.sh diff --git a/.gitignore b/.gitignore index ed4ae2d..7b99c26 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ opencloud/Chart.lock -deployed_config.sh \ No newline at end of file +deployed_config.sh +kind-logs \ No newline at end of file diff --git a/README.md b/README.md index 71e6f2c..45898ac 100644 --- a/README.md +++ b/README.md @@ -8,42 +8,63 @@ environment is a legitimate target. # Locally built microservices deployment procedure -## Clone the repository +## Install OC-K8S ``` git clone https://cloud.o-forge.io/plm/oc-k8s.git +sudo cp oc-k8s.sh /usr/bin/oc-k8s +sudo chmod +x /usr/bin/oc-k8s ``` ## Install kind Follow instructions here https://kind.sigs.k8s.io/ +or + +``` +go install sigs.k8s.io/kind@v0.30.0 && kind create cluster +``` + ## Install helm Download suitable helm client here https://helm.sh/docs/intro/install/ +## Resume for a first start +``` +oc-k8s start +``` +or +``` +./oc-k8s.sh start +``` +To stop : +``` +oc-k8s stop +``` +or +``` +./oc-k8s.sh start +``` ## Fire up a kind cluster -Execute following script to create a single node development k8s cluster - -``` -create_kind_cluster.sh -``` - WARNING APACHE & NGINX ARE NOT RUNNING: - `sudo /etc/init.d/apache2 stop` - `sudo nginx -s stop` + +Execute following script to create a single node development k8s cluster + +``` +oc-k8s create cluster +``` +or +``` +./oc-k8s.sh create cluster +``` + It will create a *opencloud* docker container running kubernetes services. -## Clone all the microservices repositories taking part of the opencloud environment - -Execute following script to clone all the needed parts: - -``` -clone_opencloud_microservices.sh -``` - ## Build everything You need to build and publish all the opencloud microservices images in the kind cluster before deploying the Helm package. @@ -51,13 +72,21 @@ You need to build and publish all the opencloud microservices images in the kind Proceed as following: ``` -build_opencloud_microservices.sh +oc-k8s build services [branch(default:mail)] [target(default:all)] +``` +or +``` +./oc-k8s.sh build services [branch(default:mail)] [target(default:all)] ``` ## Deploy the opencloud chart ``` -install_development.sh +oc-k8s create helm [env(default:dev)] +``` +or +``` +./oc-k8s.sh create helm [env(default:dev)] ``` Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage. @@ -74,9 +103,6 @@ Edit your /etc/hosts file, and add following line: Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride -# Prebuilt microservices deployment procedure - -TODO # First steps diff --git a/build_opencloud_microservices.sh b/build_opencloud_microservices.sh deleted file mode 100755 index c8a36ac..0000000 --- a/build_opencloud_microservices.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# Get the target from the first argument or use "all" as default -TARGET=${1:-all} - -find .. -mindepth 2 -maxdepth 2 -name 'Makefile' | while read -r makefile; do - dir=$(dirname "$makefile") - echo "Running 'make $TARGET' in $dir" - ( - cd "$dir" && export HOST="${2:-http://beta.opencloud.com/}" && make "$TARGET" - ) - if [ $? -ne 0 ]; then - echo "Error: make $TARGET failed in $dir" - exit 1 - fi -done - -echo "All make processes completed successfully." diff --git a/clone_opencloud_microservices.sh b/clone_opencloud_microservices.sh deleted file mode 100755 index 2c3d851..0000000 --- a/clone_opencloud_microservices.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - - -REPOS=( - "oc-auth" - "oc-catalog" - "oc-datacenter" - "oc-front" - "oc-monitord" - "oc-peer" - "oc-shared" - "oc-scheduler" - "oc-schedulerd" - "oc-workflow" - "oc-workspace" -) - -# Function to clone repositories -clone_repo() { - local branch=${2:-main} - local repo_url="https://cloud.o-forge.io/core/$1.git" - local repo_name=$(basename "$repo_url" .git) - - echo "Processing repository: $repo_name" - - if [ ! -d "$1" ]; then - echo "Cloning repository: $repo_name" - git clone "$repo_url" - if [ $? -ne 0 ]; then - echo "Error cloning $repo_url" - exit 1 - fi - fi - echo "Repository '$repo_name' already exists. Pulling latest changes..." - cd "$repo_name" && git checkout $branch && git pull && cd .. -} -branch=${1:-main} -cd .. -# Iterate through each repository in the list -for repo in "${REPOS[@]}"; do - clone_repo "$repo" "$branch" -done - -echo "All repositories processed successfully." - - - diff --git a/create_kind_cluster.sh b/create_kind_cluster.sh deleted file mode 100755 index d754bc9..0000000 --- a/create_kind_cluster.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -./delete_kind_cluster.sh | true -cat < ./deployed_config diff --git a/oc-k8s.sh b/oc-k8s.sh new file mode 100755 index 0000000..21b0522 --- /dev/null +++ b/oc-k8s.sh @@ -0,0 +1,267 @@ +#!/bin/bash +REPOS=( + "oc-auth" + "oc-catalog" + "oc-datacenter" + "oc-front" + "oc-monitord" + "oc-peer" + "oc-shared" + "oc-scheduler" + "oc-schedulerd" + "oc-workflow" + "oc-workspace" +) +# values template +main_create_values() { + set -euo pipefail + + if [[ -z "${1:-}" ]]; then + echo "Error: No RELEASE PROVIDED." + main_help_values + exit 1 + fi + + TEMPLATE_FILE=./opencloud/values.yaml.template + ENV_FILE=${2:-} + OUTPUT_FILE="./opencloud/values/$1-values.yaml" + + # Load environment variables from env file + if [[ -f "$ENV_FILE" ]]; then + set -a + source "$ENV_FILE" + set +a + fi + + # Process the template + awk ' + { + line = $0 + # match ${VAR:-default} patterns + while (match(line, /\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]+)\}/, arr)) { + varname = arr[1] + defaultval = arr[2] + # get environment value or default + cmd = "bash -c '\''echo ${" varname ":-" defaultval "}'\''" + cmd | getline value + close(cmd) + line = substr(line, 1, RSTART-1) value substr(line, RSTART+RLENGTH) + } + print line + }' "$TEMPLATE_FILE" > "$OUTPUT_FILE" + + echo "Rendered $OUTPUT_FILE from $TEMPLATE_FILE using $ENV_FILE" +} + +# HELM SERVICE +main_create_helm() { + main_delete_helm "${1:-dev}" | true + RELEASE_NAME=${1:-dev} + RELEASE_NAMESPACE=${1:-dev} + + helm install ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml + + kind get kubeconfig --name opencloud > ./deployed_config + + kind export logs ./kind-logs +} + +main_upgrade_helm() { + RELEASE_NAME=${1:-dev} + RELEASE_NAMESPACE=${1:-dev} + + helm upgrade ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml +} + +main_delete_helm() { + RELEASE_NAME=${1:-dev} + RELEASE_NAMESPACE=${1:-dev} + + helm uninstall ${RELEASE_NAME} -n ${RELEASE_NAMESPACE} + + export KUBECONFIG=$(realpath ~/.kube/config) +} +# CLUSTER SERVICE + +build_service() { + local repo_url="https://cloud.o-forge.io/core/$1.git" + local branch=${2:-main} + local target=${3:-all} + local repo_name=$(basename "$repo_url" .git) + + echo "Processing repository: $repo_name" + + if [ ! -d "$1" ]; then + echo "Cloning repository: $repo_name" + git clone "$repo_url" + if [ $? -ne 0 ]; then + echo "Error cloning $repo_url" + exit 1 + fi + fi + echo "Repository '$repo_name' now exists. Pulling latest changes..." + cd "$repo_name" && git checkout $branch && git pull + + echo "Running 'make $target' in $repo_name" + export HOST="${2:-http://beta.opencloud.com/}" && make "$target" + if [ $? -ne 0 ]; then + echo "Error: make $target failed in $dir" + exit 1 + fi + cd .. +} + +main_build_services() { + branch=${1:-main} + target=${2:-all} + cd .. + # Iterate through each repository in the list + for repo in "${REPOS[@]}"; do + build_service "$repo" "$branch" "$target" + done + + echo "All repositories processed successfully." +} + +# CLUSTER CONTROLLER + +main_delete_cluster() { + kind delete cluster --name opencloud | true +} + +main_create_cluster() { + main_delete_cluster | true + cat < /tmp/kind-opencloud.kubeconfig + KUBECONFIG=~/.kube/config:/tmp/kind-opencloud.kubeconfig kubectl config view --flatten > ~/.kube/config # fusionnate clusters config. + + kubectl config get-contexts + kubectl config use-context kind-opencloud +} + +main_help_values() { + echo " +Cluster commands: oc-k8s values + create - Create a new values release yaml + help - Show this help message + +Usage: + oc-k8s create values [release] [env_file (optionnal)] + release - Release values name (required) + env_file - env to map (optionnal) + oc-k8s help values +" +} + +main_help_cluster() { + echo " +Cluster commands: oc-k8s cluster + create - Create a new kind cluster named 'opencloud' + delete - Delete the kind cluster named 'opencloud' + help - Show this help message + +Usage: + oc-k8s create cluster + oc-k8s delete cluster + oc-k8s help cluster +" +} + +main_help_services() { + echo " +Service commands: oc-k8s services + build - Build all opencloud services + help - Show this help message + +Usage: + oc-k8s build services [branch] [target] + branch - Git branch to build (default: main) + target - make target (default: all) + oc-k8s help services +" +} + +main_help_helm() { + echo " +Helm commands: oc-k8s helm + create - Install a helm release for the given environment (default: dev) + delete - Uninstall a helm release for the given environment (default: dev) + help - Show this help message + +Usage: + oc-k8s create helm [env] + dev - environnement selected (default: dev) + oc-k8s upgrade helm [env] + dev - environnement selected (default: dev) + oc-k8s delete helm [env] + dev - environnement selected (default: dev) + oc-k8sh help helm +" +} + +main_help_all() { + echo " +Main commands: oc-k8s + start - Start opencloud k8s + stop - Stop opencloud k8s +Usage: + oc-k8s start [args] + oc-k8s stop [args] +" + main_help_cluster + main_help_services + main_help_helm + main_help_values +} + +main_start() { + sudo /etc/init.d/apache2 stop + sudo nginx -s stop + main_create_cluster "${@:1}" + main_build_services "${@:1}" + main_create_helm "${@:1}" +} + +main_stop() { + main_delete_helm "${@:1}" | true + main_delete_cluster "${@:1}" | true +} +if declare -f main_${1} > /dev/null; then + main_${1} "${@:2}" +elif declare -f main_${1}_${2} > /dev/null; then + main_${1}_${2} "${@:3}" +else + echo "Function does not exist" + main_help_all +fi \ No newline at end of file diff --git a/opencloud/Chart.yaml b/opencloud/Chart.yaml index 173cd54..50cceb5 100644 --- a/opencloud/Chart.yaml +++ b/opencloud/Chart.yaml @@ -50,3 +50,7 @@ dependencies: version: 1.1.3 repository: "https://helm.joxit.dev/" condition: docker-registry-ui.enabled +- name: prometheus + version: "27.45.0" + repository: "https://prometheus-community.github.io/helm-charts" + condition: prometheus.enabled diff --git a/opencloud/README.md b/opencloud/README.md new file mode 100644 index 0000000..430a271 --- /dev/null +++ b/opencloud/README.md @@ -0,0 +1,129 @@ +# HOW TO MAKE YOUR PROPER VALUES.YAML + +Use command : +``` +oc-k8s create values [release] [env_file (optionnal)] +``` +or +``` +./oc-k8s.sh create values [release] [env_file (optionnal)] +``` + +Map in a env file, any Variable you wish to override and give the path. + +## ENV VARIABLE + +| Variable | Default | Purpose / Explanation | +| -------------- | ---------------------- | --------------------------------------------------------------------------------- | +| `HOST` | `exemple.com` | Domain for reverse proxy rules (Traefik). | +| `REGISTRY_HOST` | `registry.exemple.com` | Docker registry URL for reverse proxy and pull secrets. | + +## MONGO VARIABLE + +| Variable | Default | Purpose / Explanation | +| ------------------- | ----------- | ------------------------------------------ | +| `OC_MONGO_ENABLED` | `true` | Enable/disable MongoDB deployment. | +| `OC_MONGO_ADMIN` | `admin` | Root username for MongoDB. | +| `OC_MONGO_PWD` | `admin` | Root password. | +| `OC_MONGO_DATABASE` | `opencloud` | Default database to create. | +| `OC_MONGO_SIZE` | `5000Mi` | Persistent storage size for MongoDB. | + +## MONGO EXPRESS VARIABLE + +| Variable | Default | Purpose / Explanation | +| ----------------------------------------- | -------------------------------------------------------- | ------------------------------------------- | +| `OC_MONGOEXPRESS_ENABLED` | `true` | Enable mongo-express UI. | +| `OC_MONGOEXPRESS_ADMIN` | `${OC_MONGO_ADMIN:-admin}` | Admin username to connect to MongoDB. | +| `OC_MONGOEXPRESS_PWD` | `${OC_MONGO_PWD:-admin}` | Admin password. | + +## NATS VARIABLE + +| Variable | Default | Explanation | +| ----------------- | ------- | -------------------------------------- | +| `OC_NATS_ENABLED` | `true` | Deploy NATS cluster. | +| `OC_NATS_SIZE` | `20Mi` | Storage size for JetStream file store. | + +## OpenLDAP VARIABLE + +| Variable | Default | Explanation | +| --------------------------- | ---------------------------- | -------------------------------------------------------- | +| `OC_LDAP_ENABLED` | `true` | Deploy OpenLDAP server. | +| `OC_LDAP_IMAGE` | `osixia/openldap` | Docker image for OpenLDAP. | +| `OC_LDAP_ORGANISATION` | `Opencloud` | LDAP organization name. | +| `OC_LDAP_DOMAIN` | `opencloud.com` | LDAP domain. | +| `OC_LDAP_TLS` | `false` | Enable TLS for LDAP. | +| `OC_LDAP_ADMIN_PWD` | `admin` | LDAP admin password. | +| `OC_LDAP_CONFIG_PWD` | `config` | Password for configuration account. | +| `OC_LDAP_EXTERNAL` | `false` | Connect to external LDAP instead of internal deployment. | +| `OC_LDAP_EXTERNAL_ENDPOINT` | (none) | LDAP server URL. | +| `OC_LDAP_EXTERNAL_DN` | `cn=admin,dc=example,dc=com` | Bind DN for external LDAP. | +| `OC_LDAP_EXTERNAL_PWD` | `admin` | Bind password for external LDAP. | + +## Prometheus VARIABLE + +| Variable | Default | Explanation | +| ------------------------------- | ------- | ------------------------- | +| `OC_PROMETHEUS_ENABLED` | `true` | Enable Prometheus server. | +| `OC_PROMETHEUS_SIZE` | `5Gi` | Persistent volume size. | +| `OC_PROMETHEUS_LIMITS_CPU` | `500m` | CPU limit. | +| `OC_PROMETHEUS_LIMITS_MEMORY` | `512Mi` | Memory limit. | +| `OC_PROMETHEUS_REQUESTS_CPU` | `128m` | CPU request. | +| `OC_PROMETHEUS_REQUESTS_MEMORY` | `256Mi` | Memory request. | + +## Grafana VARIABLE + +| VARIABLE | DEFAULT | DESCRIPTION | +| -------------------------------------- | ----------- | ------------------------------------------------------------------------------------ | +| `OC_GRAFANA_ENABLED` | `true` | Enable or disable Grafana deployment. | +| `OC_GRAFANA_ADMIN_USER` | `admin` | Username for the Grafana admin account. | +| `OC_GRAFANA_ADMIN_PWD` | `admin` | Password for the Grafana admin account. | +| `OC_GRAFANA_SIZE` | `1Gi` | Size of the persistent volume for Grafana. | + +## Traefik VARIABLE + +| Variable | Default | Explanation | +| ------------------------------- | ------- | ------------------------- | +| `OC_TRAEFIK_ENABLED` | `true` | Enable Traefik server. | + +## Hydra VARIABLE + +| Variable | Default | Explanation | +| ------------------ | ------------------------------------------------------------------------ | ------------------------------------------- | +| `OC_HYDRA_ENABLED` | `true` | Deploy Hydra (OAuth2). | + +## Keto VARIABLE + +| VARIABLE | DEFAULT | DESCRIPTION | +| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- | +| `OC_KETO_ENABLED` | `true` | Enable or disable Keto deployment. | + +## Loki VARIABLE + +| VARIABLE | DEFAULT | DESCRIPTION | +| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- | +| `OC_LOKI_ENABLED` | `true` | Enable or disable Loki deployment. | +| `OC_LOKI_SIZE` | `1Gi` | Resource allowed. | + +## Minio VARIABLE + +| VARIABLE | DEFAULT | DESCRIPTION | +| ---------------------------- | --------------------------------- | ---------------------------------------------------------------------------------------------- | +| `OC_MINIO_ENABLED` | `true` | Enable or disable Minio deployment. | + +## Argo VARIABLE + +| VARIABLE | DEFAULT | DESCRIPTION | +| ------------------------------------------ | --------------- | ---------------------------------------------- | +| `OC_ARGO_ENABLED` | `false` | Enable or disable Argo Workflows deployment. | + +## OC API VARIABLE + +| Variable | Default | Explanation | +| ------------------------------------ | ------------ | --------------------------------- | +| `OC__ENABLED` | true | Deploy the service. | +| `OC__IMAGE` | registry URL | Docker image. | +| `OC__LIMITS_CPU/MEMORY` | 128m / 256Mi | Resource limits. | +| `OC__REQUESTS_CPU/MEMORY` | 128m / 256Mi | Resource requests. | +| `OC__REPLICAS_ENABLED` | true | Enable Horizontal Pod Autoscaler. | +| `OC__REPLICAS_MAX` | 5 | Max replicas. | +| `OC__REPLICAS_USAGE` | 80 | HPA target CPU usage (%). | diff --git a/opencloud/charts/prometheus/.helmignore b/opencloud/charts/prometheus/.helmignore new file mode 100644 index 0000000..eaeaf7a --- /dev/null +++ b/opencloud/charts/prometheus/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +ci/ +OWNERS diff --git a/opencloud/charts/prometheus/Chart.lock b/opencloud/charts/prometheus/Chart.lock new file mode 100644 index 0000000..1bbfc92 --- /dev/null +++ b/opencloud/charts/prometheus/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: alertmanager + repository: https://prometheus-community.github.io/helm-charts + version: 1.28.0 +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 6.4.1 +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.49.1 +- name: prometheus-pushgateway + repository: https://prometheus-community.github.io/helm-charts + version: 3.4.2 +digest: sha256:fedbc59b33be92cc31268269ffcd55336a21d62e3b9ae3874e99f4ca63479991 +generated: "2025-11-04T21:27:16.796331119Z" diff --git a/opencloud/charts/prometheus/Chart.yaml b/opencloud/charts/prometheus/Chart.yaml new file mode 100644 index 0000000..deb7a74 --- /dev/null +++ b/opencloud/charts/prometheus/Chart.yaml @@ -0,0 +1,58 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts + - name: Upstream Project + url: https://github.com/prometheus/prometheus +apiVersion: v2 +appVersion: v3.7.3 +dependencies: +- condition: alertmanager.enabled + name: alertmanager + repository: https://prometheus-community.github.io/helm-charts + version: 1.28.* +- condition: kube-state-metrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 6.4.* +- condition: prometheus-node-exporter.enabled + name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.49.* +- condition: prometheus-pushgateway.enabled + name: prometheus-pushgateway + repository: https://prometheus-community.github.io/helm-charts + version: 3.4.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- monitoring +- prometheus +kubeVersion: '>=1.19.0-0' +maintainers: +- email: gianrubio@gmail.com + name: gianrubio + url: https://github.com/gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh + url: https://github.com/zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro + url: https://github.com/Xtigyro +- email: naseem@transit.app + name: naseemkullah + url: https://github.com/naseemkullah +- email: rootsandtrees@posteo.de + name: zeritti + url: https://github.com/zeritti +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 27.45.0 diff --git a/opencloud/charts/prometheus/README.md b/opencloud/charts/prometheus/README.md new file mode 100644 index 0000000..0191771 --- /dev/null +++ b/opencloud/charts/prometheus/README.md @@ -0,0 +1,392 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.7+ + +## Usage + +The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/). + +- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus` +- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus` + +The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository. + +### Install Chart + +Starting with version 16.0, the Prometheus chart requires Helm 3.7+ in order to install successfully. Please check your `helm` release before installation. + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +### Dependencies + +By default this chart installs additional, dependent charts: + +- [alertmanager](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager) +- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) +- [prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) +- [prometheus-pushgateway](https://github.com/walker-tom/helm-charts/tree/main/charts/prometheus-pushgateway) + +To disable the dependency during installation, set `alertmanager.enabled`, `kube-state-metrics.enabled`, `prometheus-node-exporter.enabled` and `prometheus-pushgateway.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +### Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +### Updating values.schema.json + +A [`values.schema.json`](https://helm.sh/docs/topics/charts/#schema-files) file has been added to validate chart values. When `values.yaml` file has a structure change (i.e. add a new field, change value type, etc.), modify `values.schema.json` file manually or run `helm schema-gen values.yaml > values.schema.json` to ensure the schema is aligned with the latest values. Refer to [helm plugin `helm-schema-gen`](https://github.com/karuppiah7890/helm-schema-gen) for plugin installation instructions. + +### Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +#### To 27.0 + +Prometheus' configuration parameter `insecure_skip_verify` in scrape configs `serverFiles."prometheus.yml".scrape_configs` has been commented out keeping thus the default Prometheus' value. +If certificate verification must be skipped, please, uncomment the line before upgrading. + +#### To 26.0 + +This release changes default version of promethues to v3.0.0, See official [migration guide](https://prometheus.io/docs/prometheus/latest/migration/#prometheus-3-0-migration-guide +) and [release notes](https://github.com/prometheus/prometheus/releases/tag/v3.0.0) for more details. + +#### To 25.0 + +The `server.remoteRead[].url` and `server.remoteWrite[].url` fields now support templating. Allowing for `url` values such as `https://{{ .Release.Name }}.example.com`. + +Any entries in these which previously included `{{` or `}}` must be escaped with `{{ "{{" }}` and `{{ "}}" }}` respectively. Entries which did not previously include the template-like syntax will not be affected. + +#### To 24.0 + +Require Kubernetes 1.19+ + +Release 1.0.0 of the _alertmanager_ replaced [configmap-reload](https://github.com/jimmidyson/configmap-reload) with [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via `configmapReload.prometheus.extraArgs` are not compatible and will break with the new prometheus-config-reloader. Please, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +#### To 23.0 + +Release 5.0.0 of the _kube-state-metrics_ chart introduced a separation of the `image.repository` value in two distinct values: + +```console + image: + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics +``` + +If a custom values file or CLI flags set `kube-state.metrics.image.repository`, please, set the new values accordingly. + +If you are upgrading _prometheus-pushgateway_ with the chart and _prometheus-pushgateway_ has been deployed as a statefulset with a persistent volume, the statefulset must be deleted before upgrading the chart, e.g.: + +```bash +kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway -n monitoring --cascade=orphan +``` + +Users are advised to review changes in the corresponding chart releases before upgrading. + +#### To 22.0 + +The `app.kubernetes.io/version` label has been removed from the pod selector. + +Therefore, you must delete the previous StatefulSet or Deployment before upgrading. Performing this operation will cause **Prometheus to stop functioning** until the upgrade is complete. + +```console +kubectl delete deploy,sts -l app.kubernetes.io/name=prometheus +``` + +#### To 21.0 + +The Kubernetes labels have been updated to follow [Helm 3 label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +Specifically, labels mapping is listed below: + +| OLD | NEW | +|--------------------|------------------------------| +|heritage | app.kubernetes.io/managed-by | +|chart | helm.sh/chart | +|[container version] | app.kubernetes.io/version | +|app | app.kubernetes.io/name | +|release | app.kubernetes.io/instance | + +Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. + +If `runAsStatefulSet: false` (this is the default): + +```console +kubectl delete deploy -l app=prometheus +``` + +If `runAsStatefulSet: true`: + +```console +kubectl delete sts -l app=prometheus +``` + +After that do the actual upgrade: + +```console +helm upgrade -i prometheus prometheus-community/prometheus +``` + +#### To 20.0 + +The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +#### To 19.0 + +Prometheus has been updated to version v2.40.5. + +Prometheus-pushgateway was updated to version 2.0.0 which adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +See the [upgrade docs of the prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway#to-200) to see whats to do, before you upgrade Prometheus! + +The condition in Chart.yaml to disable kube-state-metrics has been changed from `kubeStateMetrics.enabled` to `kube-state-metrics.enabled` + +The Docker image tag is used from appVersion field in Chart.yaml by default. + +Unused subchart configs has been removed and subchart config is now on the bottom of the config file. + +If Prometheus is used as deployment the updatestrategy has been changed to "Recreate" by default, so Helm updates work out of the box. + +`.Values.server.extraTemplates` & `.Values.server.extraObjects` has been removed in favour of `.Values.extraManifests`, which can do the same. + +`.Values.server.enabled` has been removed as it's useless now that all components are created by subcharts. + +All files in `templates/server` directory has been moved to `templates` directory. + +```bash +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 19.0.0 +``` + +#### To 18.0 + +Version 18.0.0 uses alertmanager service from the [alertmanager chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager). If you've made some config changes, please check the old `alertmanager` and the new `alertmanager` configuration section in values.yaml for differences. + +Note that the `configmapReload` section for `alertmanager` was moved out of dedicated section (`configmapReload.alertmanager`) to alertmanager embedded (`alertmanager.configmapReload`). + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 17.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 18.0.0 +``` + +#### To 17.0 + +Version 17.0.0 uses pushgateway service from the [prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway). If you've made some config changes, please check the old `pushgateway` and the new `prometheus-pushgateway` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 16.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 17.0.0 +``` + +#### To 16.0 + +Starting from version 16.0 embedded services (like alertmanager, node-exporter etc.) are moved out of Prometheus chart and the respecting charts from this repository are used as dependencies. Version 16.0.0 moves node-exporter service to [prometheus-node-exporter chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter). If you've made some config changes, please check the old `nodeExporter` and the new `prometheus-node-exporter` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 15.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 16.0.0 +``` + +#### To 15.0 + +Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. + +Before you update please execute the following command, to be able to update kube-state-metrics: + +```bash +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +#### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +#### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out [the 2.x migration guide](https://prometheus.io/docs/prometheus/2.55/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +#### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values oci://ghcr.io/prometheus-community/charts/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see its configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prom-label-proxy -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/opencloud/charts/prometheus/charts/alertmanager/.helmignore b/opencloud/charts/prometheus/charts/alertmanager/.helmignore new file mode 100644 index 0000000..72fc977 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +unittests/ +ci/ diff --git a/opencloud/charts/prometheus/charts/alertmanager/Chart.yaml b/opencloud/charts/prometheus/charts/alertmanager/Chart.yaml new file mode 100644 index 0000000..da1390d --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: v0.29.0 +description: The Alertmanager handles alerts sent by client applications such as the + Prometheus server. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- monitoring +kubeVersion: '>=1.25.0-0' +maintainers: +- email: monotek23@gmail.com + name: monotek + url: https://github.com/monotek +- email: naseem@transit.app + name: naseemkullah + url: https://github.com/naseemkullah +name: alertmanager +sources: +- https://github.com/prometheus/alertmanager +type: application +version: 1.28.0 diff --git a/opencloud/charts/prometheus/charts/alertmanager/README.md b/opencloud/charts/prometheus/charts/alertmanager/README.md new file mode 100644 index 0000000..12f845a --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/README.md @@ -0,0 +1,62 @@ +# Alertmanager + +As per [prometheus.io documentation](https://prometheus.io/docs/alerting/latest/alertmanager/): +> The Alertmanager handles alerts sent by client applications such as the +> Prometheus server. It takes care of deduplicating, grouping, and routing them +> to the correct receiver integration such as email, PagerDuty, or OpsGenie. It +> also takes care of silencing and inhibition of alerts. + +## Prerequisites + +Kubernetes 1.14+ + +## Usage + +The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/). + +- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/alertmanager` +- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `alertmanager` + +The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository. + +### Install Chart + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/alertmanager +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +### Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +### Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 1.0 + +The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extea command-line arguments. +The `networking.k8s.io/v1beta1` is no longer supported. use [`networking.k8s.io/v1`](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#ingressclass-v122). + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values oci://ghcr.io/prometheus-community/charts/alertmanager +``` diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/NOTES.txt b/opencloud/charts/prometheus/charts/alertmanager/templates/NOTES.txt new file mode 100644 index 0000000..46ea5be --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ include "alertmanager.namespace" . }} svc -w {{ include "alertmanager.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "alertmanager.namespace" . }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "alertmanager.namespace" . }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" + kubectl --namespace {{ include "alertmanager.namespace" . }} port-forward $POD_NAME {{ .Values.service.port }}:80 +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/_helpers.tpl b/opencloud/charts/prometheus/charts/alertmanager/templates/_helpers.tpl new file mode 100644 index 0000000..0c34c73 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/_helpers.tpl @@ -0,0 +1,81 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "alertmanager.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "alertmanager.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "alertmanager.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "alertmanager.labels" -}} +helm.sh/chart: {{ include "alertmanager.chart" . }} +{{ include "alertmanager.selectorLabels" . }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "alertmanager.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alertmanager.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "alertmanager.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "alertmanager.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Define Ingress apiVersion +*/}} +{{- define "alertmanager.ingress.apiVersion" -}} +{{- printf "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Allow overriding alertmanager namespace +*/}} +{{- define "alertmanager.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/configmap.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/configmap.yaml new file mode 100644 index 0000000..9e5882d --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/configmap.yaml @@ -0,0 +1,21 @@ +{{- if .Values.config.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.configAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +data: + alertmanager.yml: | + {{- $config := omit .Values.config "enabled" }} + {{- toYaml $config | default "{}" | nindent 4 }} + {{- range $key, $value := .Values.templates }} + {{ $key }}: |- + {{- $value | nindent 4 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/ingress.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/ingress.yaml new file mode 100644 index 0000000..4112d64 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/ingress.yaml @@ -0,0 +1,47 @@ +{{- if .Values.ingress.enabled }} +{{- $fullName := include "alertmanager.fullname" . }} +{{- $svcPort := .Values.service.port }} +apiVersion: {{ include "alertmanager.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- if .Values.ingress.labels }} + {{- toYaml .Values.ingress.labels | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/ingressperreplica.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/ingressperreplica.yaml new file mode 100644 index 0000000..6f5a023 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/ingressperreplica.yaml @@ -0,0 +1,56 @@ +{{- if and .Values.servicePerReplica.enabled .Values.ingressPerReplica.enabled }} +{{- $pathType := .Values.ingressPerReplica.pathType }} +{{- $count := .Values.replicaCount | int -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressValues := .Values.ingressPerReplica -}} +{{- $fullName := include "alertmanager.fullname" . }} +apiVersion: v1 +kind: List +metadata: + name: {{ $fullName }}-ingressperreplica + namespace: {{ include "alertmanager.namespace" . }} +items: +{{- range $i, $e := until $count }} + - kind: Ingress + apiVersion: {{ include "alertmanager.ingress.apiVersion" $ }} + metadata: + name: {{ $fullName }}-{{ $i }} + namespace: {{ include "alertmanager.namespace" $ }} + labels: + {{- include "alertmanager.labels" $ | nindent 8 }} + {{- if $ingressValues.labels }} +{{ toYaml $ingressValues.labels | indent 8 }} + {{- end }} + {{- if $ingressValues.annotations }} + annotations: +{{ toYaml $ingressValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if $ingressValues.className }} + ingressClassName: {{ $ingressValues.className }} + {{- end }} + rules: + - host: {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + http: + paths: + {{- range $p := $ingressValues.paths }} + - path: {{ tpl $p $ }} + pathType: {{ $pathType }} + backend: + service: + name: {{ $fullName }}-{{ $i }} + port: + name: http + {{- end -}} + {{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }} + tls: + - hosts: + - {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + {{- if $ingressValues.tlsSecretPerReplica.enabled }} + secretName: {{ $ingressValues.tlsSecretPerReplica.prefix }}-{{ $i }} + {{- else }} + secretName: {{ $ingressValues.tlsSecretName }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/pdb.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/pdb.yaml new file mode 100644 index 0000000..920a853 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml new file mode 100644 index 0000000..bc9ccaa --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "alertmanager.serviceAccountName" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/serviceperreplica.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/serviceperreplica.yaml new file mode 100644 index 0000000..8aa543b --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/serviceperreplica.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.servicePerReplica.enabled }} +{{- $count := .Values.replicaCount | int -}} +{{- $serviceValues := .Values.servicePerReplica -}} +apiVersion: v1 +kind: List +metadata: + name: {{ include "alertmanager.fullname" . }}-serviceperreplica + namespace: {{ include "alertmanager.namespace" . }} +items: +{{- range $i, $e := until $count }} + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "alertmanager.fullname" $ }}-{{ $i }} + namespace: {{ include "alertmanager.namespace" $ }} + labels: + {{- include "alertmanager.labels" $ | nindent 8 }} + {{- if $serviceValues.annotations }} + annotations: +{{ toYaml $serviceValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if $serviceValues.clusterIP }} + clusterIP: {{ $serviceValues.clusterIP }} + {{- end }} + {{- if $serviceValues.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := $serviceValues.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + {{- if ne $serviceValues.type "ClusterIP" }} + externalTrafficPolicy: {{ $serviceValues.externalTrafficPolicy }} + {{- end }} + ports: + - name: http + port: {{ $.Values.service.port }} + targetPort: {{ $.Values.containerPortName }} + selector: + {{- include "alertmanager.selectorLabels" $ | nindent 8 }} + statefulset.kubernetes.io/pod-name: {{ include "alertmanager.fullname" $ }}-{{ $i }} + type: "{{ $serviceValues.type }}" +{{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/services.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/services.yaml new file mode 100644 index 0000000..ebc583c --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/services.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + {{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} + {{- end }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := . }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.containerPortName }} + protocol: TCP + name: http + {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alertmanager.fullname" . }}-headless + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + clusterIP: None + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.containerPortName }} + protocol: TCP + name: http + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - port: {{ .Values.service.clusterPort }} + targetPort: clusterpeer-tcp + protocol: TCP + name: cluster-tcp + - port: {{ .Values.service.clusterPort }} + targetPort: clusterpeer-udp + protocol: UDP + name: cluster-udp + {{- end }} + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/statefulset.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/statefulset.yaml new file mode 100644 index 0000000..6c9ff82 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/statefulset.yaml @@ -0,0 +1,280 @@ +{{- $svcClusterPort := .Values.service.clusterPort }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.statefulSet.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + minReadySeconds: {{ .Values.minReadySeconds }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} + serviceName: {{ include "alertmanager.fullname" . }}-headless + template: + metadata: + labels: + {{- include "alertmanager.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- if not .Values.configmapReload.enabled }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.hostUsers }} + hostUsers: true + {{- end }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "alertmanager.serviceAccountName" . }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.schedulerName }} + schedulerName: {{ . }} + {{- end }} + {{- if or .Values.podAntiAffinity .Values.affinity }} + affinity: + {{- end }} + {{- with .Values.affinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]} + {{- else if eq .Values.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.extraInitContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.enabled }} + - name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + {{- with .Values.configmapReload.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + {{- if and (hasKey .Values.configmapReload.extraArgs "config-file" | not) (hasKey .Values.configmapReload.extraArgs "watched-dir" | not) }} + - --watched-dir=/etc/alertmanager + {{- end }} + {{- if not (hasKey .Values.configmapReload.extraArgs "reload-url") }} + - --reload-url=http://127.0.0.1:9093/-/reload + {{- end }} + {{- range $key, $value := .Values.configmapReload.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + resources: + {{- toYaml .Values.configmapReload.resources | nindent 12 }} + {{- with .Values.configmapReload.containerPort }} + ports: + - containerPort: {{ . }} + {{- end }} + {{- with .Values.configmapReload.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.configmapReload.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.configmapReload.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config.enabled }} + - name: config + mountPath: /etc/alertmanager + {{- end }} + {{- if .Values.configmapReload.extraVolumeMounts }} + {{- toYaml .Values.configmapReload.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + {{- with .Values.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --storage.path=/alertmanager + {{- if not (hasKey .Values.extraArgs "config.file") }} + - --config.file=/etc/alertmanager/alertmanager.yml + {{- end }} + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - --cluster.advertise-address=[$(POD_IP)]:{{ $svcClusterPort }} + - --cluster.listen-address=0.0.0.0:{{ $svcClusterPort }} + {{- end }} + {{- if gt (int .Values.replicaCount) 1}} + {{- $fullName := include "alertmanager.fullname" . }} + {{- range $i := until (int .Values.replicaCount) }} + - --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:{{ $svcClusterPort }} + {{- end }} + {{- end }} + {{- if .Values.additionalPeers }} + {{- range $item := .Values.additionalPeers }} + - --cluster.peer={{ $item }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.baseURL }} + - --web.external-url={{ .Values.baseURL }} + {{- end }} + ports: + - name: {{ .Values.containerPortName }} + containerPort: 9093 + protocol: TCP + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - name: clusterpeer-tcp + containerPort: {{ $svcClusterPort }} + protocol: TCP + - name: clusterpeer-udp + containerPort: {{ $svcClusterPort }} + protocol: UDP + {{- end }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- if .Values.config.enabled }} + - name: config + mountPath: /etc/alertmanager + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: /alertmanager + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.config.enabled }} + - name: config + configMap: + name: {{ include "alertmanager.fullname" . }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.extraPodConfigs }} + {{- toYaml .Values.extraPodConfigs | nindent 6 }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- with .Values.persistence.annotations }} + annotations: + {{ toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.persistence.labels }} + labels: + {{ toYaml . | nindent 14 }} + {{- end }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 10 }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} + {{- else }} + - name: storage + {{- with .Values.persistence.emptyDir }} + emptyDir: + {{- toYaml . | nindent 12 }} + {{- else }} + emptyDir: {} + {{- end -}} + {{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/tests/test-connection.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/tests/test-connection.yaml new file mode 100644 index 0000000..410eba5 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/tests/test-connection.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "alertmanager.fullname" . }}-test-connection" + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.testFramework.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "alertmanager.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/templates/vpa.yaml b/opencloud/charts/prometheus/charts/alertmanager/templates/vpa.yaml new file mode 100644 index 0000000..53f70a2 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/templates/vpa.yaml @@ -0,0 +1,26 @@ +{{- if .Values.verticalPodAutoscaler.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ include "alertmanager.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.verticalPodAutoscaler.recommenders }} + recommenders: + {{- range .Values.verticalPodAutoscaler.recommenders }} + - name: {{ .name }} + {{- end }} + {{- end }} + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "alertmanager.fullname" . }} + {{- if .Values.verticalPodAutoscaler.updatePolicy }} + updatePolicy: + {{- toYaml .Values.verticalPodAutoscaler.updatePolicy | nindent 4 }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.resourcePolicy }} + resourcePolicy: + {{- toYaml .Values.verticalPodAutoscaler.resourcePolicy | nindent 4 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/alertmanager/values.schema.json b/opencloud/charts/prometheus/charts/alertmanager/values.schema.json new file mode 100644 index 0000000..a22ecbb --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/values.schema.json @@ -0,0 +1,972 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "title": "alertmanager", + "description": "The Alertmanager handles alerts sent by client applications such as the Prometheus server.", + "type": "object", + "required": [ + "replicaCount", + "image", + "serviceAccount", + "service", + "persistence", + "config" + ], + "definitions": { + "image": { + "description": "Container image parameters.", + "type": "object", + "required": ["repository"], + "additionalProperties": false, + "properties": { + "repository": { + "description": "Image repository. Path to the image with registry(quay.io) or without(prometheus/alertmanager) for docker.io.", + "type": "string" + }, + "pullPolicy": { + "description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.", + "type": "string", + "enum": [ + "Never", + "IfNotPresent", + "Always" + ], + "default": "IfNotPresent" + }, + "tag": { + "description": "Use chart appVersion by default.", + "type": "string", + "default": "" + } + } + }, + "resources": { + "description": "Resource limits and requests for the Container.", + "type": "object", + "properties": { + "limits": { + "description": "Resource limits for the Container.", + "type": "object", + "properties": { + "cpu": { + "description": "CPU request for the Container.", + "type": "string" + }, + "memory": { + "description": "Memory request for the Container.", + "type": "string" + } + } + }, + "requests": { + "description": "Resource requests for the Container.", + "type": "object", + "properties": { + "cpu": { + "description": "CPU request for the Container.", + "type": "string" + }, + "memory": { + "description": "Memory request for the Container.", + "type": "string" + } + } + } + } + }, + "securityContext": { + "description": "Security context for the container.", + "type": "object", + "properties": { + "capabilities": { + "description": "Specifies the capabilities to be dropped by the container.", + "type": "object", + "properties": { + "drop": { + "description": "List of capabilities to be dropped.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "readOnlyRootFilesystem": { + "description": "Specifies whether the root file system should be mounted as read-only.", + "type": "boolean" + }, + "runAsUser": { + "description": "Specifies the UID (User ID) to run the container as.", + "type": "integer" + }, + "runAsNonRoot": { + "description": "Specifies whether to run the container as a non-root user.", + "type": "boolean" + }, + "runAsGroup": { + "description": "Specifies the GID (Group ID) to run the container as.", + "type": "integer" + } + } + }, + "volumeMounts": { + "description": "List of volume mounts for the Container.", + "type": "array", + "items": { + "description": "Volume mounts for the Container.", + "type": "object", + "required": ["name", "mountPath"], + "properties": { + "name": { + "description": "The name of the volume to mount.", + "type": "string" + }, + "mountPath": { + "description": "The mount path for the volume.", + "type": "string" + }, + "readOnly": { + "description": "Specifies if the volume should be mounted in read-only mode.", + "type": "boolean" + } + } + } + }, + "env": { + "description": "List of environment variables for the Container.", + "type": "array", + "items": { + "description": "Environment variables for the Container.", + "type": "object", + "required": ["name"], + "properties": { + "name": { + "description": "The name of the environment variable.", + "type": "string" + }, + "value": { + "description": "The value of the environment variable.", + "type": "string" + } + } + } + }, + "config": { + "description": "https://prometheus.io/docs/alerting/latest/configuration/", + "duration": { + "type": "string", + "pattern": "^((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)$" + }, + "labelname": { + "type": "string", + "pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$|^...$" + }, + "route": { + "description": "Alert routing configuration.", + "type": "object", + "properties": { + "receiver": { + "description": "The default receiver to send alerts to.", + "type": "string" + }, + "group_by": { + "description": "The labels by which incoming alerts are grouped together.", + "type": "array", + "items": { + "type": "string", + "$ref": "#/definitions/config/labelname" + } + }, + "continue": { + "description": "Whether an alert should continue matching subsequent sibling nodes.", + "type": "boolean", + "default": false + }, + "matchers": { + "description": "A list of matchers that an alert has to fulfill to match the node.", + "type": "array", + "items": { + "type": "string" + } + }, + "group_wait": { + "description": "How long to initially wait to send a notification for a group of alerts.", + "$ref": "#/definitions/config/duration" + }, + "group_interval": { + "description": "How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent.", + "$ref": "#/definitions/config/duration" + }, + "repeat_interval": { + "description": "How long to wait before sending a notification again if it has already been sent successfully for an alert.", + "$ref": "#/definitions/config/duration" + }, + "mute_time_intervals": { + "description": "Times when the route should be muted.", + "type": "array", + "items": { + "type": "string" + } + }, + "active_time_intervals": { + "description": "Times when the route should be active.", + "type": "array", + "items": { + "type": "string" + } + }, + "routes": { + "description": "Zero or more child routes.", + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/config/route" + } + } + } + } + } + }, + "properties": { + "replicaCount": { + "description": "Number of desired pods.", + "type": "integer", + "default": 1, + "minimum": 0 + }, + "image": { + "description": "Container image parameters.", + "$ref": "#/definitions/image" + }, + "baseURL": { + "description": "External URL where alertmanager is reachable.", + "type": "string", + "default": "", + "examples": [ + "https://alertmanager.example.com" + ] + }, + "extraArgs": { + "description": "Additional alertmanager container arguments. Use args without '--', only 'key: value' syntax.", + "type": "object", + "default": {} + }, + "extraSecretMounts": { + "description": "Additional Alertmanager Secret mounts.", + "type": "array", + "default": [], + "items": { + "type": "object", + "required": ["name", "mountPath", "secretName"], + "properties": { + "name": { + "type": "string" + }, + "mountPath": { + "type": "string" + }, + "subPath": { + "type": "string", + "default": "" + }, + "secretName": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + } + } + } + }, + "imagePullSecrets": { + "description": "The property allows you to configure multiple image pull secrets.", + "type": "array", + "default": [], + "items": { + "type": "object", + "required": ["name"], + "properties": { + "name": { + "description": "Specifies the Secret name of the image pull secret.", + "type": "string" + } + } + } + }, + "nameOverride": { + "description": "Override value for the name of the Helm chart.", + "type": "string", + "default": "" + }, + "fullnameOverride": { + "description": "Override value for the fully qualified app name.", + "type": "string", + "default": "" + }, + "namespaceOverride": { + "description": "Override deployment namespace.", + "type": "string", + "default": "" + }, + "automountServiceAccountToken": { + "description": "Specifies whether to automatically mount the ServiceAccount token into the Pod's filesystem.", + "type": "boolean", + "default": true + }, + "hostUsers": { + "description": "Running within a user namespace, where the user IDs inside the container are mapped to different, usually unprivileged, user IDs on the host system.", + "type": "boolean", + "default": false + }, + "serviceAccount": { + "description": "Contains properties related to the service account configuration.", + "type": "object", + "required": ["create"], + "properties": { + "create": { + "description": "Specifies whether a service account should be created.", + "type": "boolean", + "default": true + }, + "annotations": { + "description": "Annotations to add to the service account.", + "type": "object", + "default": {} + }, + "name": { + "description": "The name of the service account to use. If not set and create is true, a name is generated using the fullname template.", + "type": "string", + "default": "" + } + } + }, + "schedulerName": { + "description": "Sets the schedulerName in the alertmanager pod.", + "type": "string", + "default": "" + }, + "priorityClassName": { + "description": "Sets the priorityClassName in the alertmanager pod.", + "type": "string", + "default": "" + }, + "podSecurityContext": { + "description": "Pod security context configuration.", + "type": "object", + "properties": { + "fsGroup": { + "description": "The fsGroup value for the pod's security context.", + "type": "integer", + "default": 65534 + }, + "runAsUser": { + "description": "The UID to run the pod's containers as.", + "type": "integer" + }, + "runAsGroup": { + "description": "The GID to run the pod's containers as.", + "type": "integer" + } + } + }, + "dnsConfig": { + "description": "DNS configuration for the pod.", + "type": "object", + "properties": { + "nameservers": { + "description": "List of DNS server IP addresses.", + "type": "array", + "items": { + "type": "string" + } + }, + "searches": { + "description": "List of DNS search domains.", + "type": "array", + "items": { + "type": "string" + } + }, + "options": { + "description": "List of DNS options.", + "type": "array", + "items": { + "description": "DNS options.", + "type": "object", + "required": ["name"], + "properties": { + "name": { + "description": "The name of the DNS option.", + "type": "string" + }, + "value": { + "description": "The value of the DNS option.", + "type": "string" + } + } + } + } + } + }, + "hostAliases": { + "description": "List of host aliases.", + "type": "array", + "items": { + "description": "Host aliases configuration.", + "type": "object", + "required": ["ip", "hostnames"], + "properties": { + "ip": { + "description": "IP address associated with the host alias.", + "type": "string" + }, + "hostnames": { + "description": "List of hostnames associated with the IP address.", + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "description": "Security context for the container.", + "$ref": "#/definitions/securityContext" + }, + "additionalPeers": { + "description": "Additional peers for a alertmanager.", + "type": "array", + "items": { + "type": "string" + } + }, + "extraInitContainers": { + "description": "Additional InitContainers to initialize the pod.", + "type": "array", + "default": [], + "items": { + "required": ["name", "image"], + "properties": { + "name": { + "description": "The name of the InitContainer.", + "type": "string" + }, + "image": { + "description": "The container image to use for the InitContainer.", + "type": "string" + }, + "pullPolicy": { + "description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.", + "type": "string", + "enum": [ + "Never", + "IfNotPresent", + "Always" + ], + "default": "IfNotPresent" + }, + "command": { + "description": "The command to run in the InitContainer.", + "type": "array", + "items": { + "type": "string" + } + }, + "args": { + "description": "Additional command arguments for the InitContainer.", + "type": "array", + "items": { + "type": "string" + } + }, + "ports": { + "description": "List of ports to expose from the container.", + "type": "array", + "items": { + "type": "object" + } + }, + "env": { + "description": "List of environment variables for the InitContainer.", + "$ref": "#/definitions/env" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container.", + "type": "array", + "items": { + "type": "object" + } + }, + "volumeMounts": { + "description": "List of volume mounts for the InitContainer.", + "$ref": "#/definitions/volumeMounts" + }, + "resources": { + "description": "Resource requirements for the InitContainer.", + "$ref": "#/definitions/resources" + }, + "securityContext": { + "$ref": "#/definitions/securityContext", + "description": "The security context for the InitContainer." + } + } + } + }, + "extraContainers": { + "description": "Additional containers to add to the stateful set.", + "type": "array", + "default": [], + "items": { + "required": ["name", "image"], + "properties": { + "name": { + "description": "The name of the InitContainer.", + "type": "string" + }, + "image": { + "description": "The container image to use for the InitContainer.", + "type": "string" + }, + "pullPolicy": { + "description": "Image pull policy. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.", + "type": "string", + "enum": [ + "Never", + "IfNotPresent", + "Always" + ], + "default": "IfNotPresent" + }, + "command": { + "description": "The command to run in the InitContainer.", + "type": "array", + "items": { + "type": "string" + } + }, + "args": { + "description": "Additional command arguments for the InitContainer.", + "type": "array", + "items": { + "type": "string" + } + }, + "ports": { + "description": "List of ports to expose from the container.", + "type": "array", + "items": { + "type": "object" + } + }, + "env": { + "description": "List of environment variables for the InitContainer.", + "$ref": "#/definitions/env" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container.", + "type": "array", + "items": { + "type": "object" + } + }, + "volumeMounts": { + "description": "List of volume mounts for the InitContainer.", + "$ref": "#/definitions/volumeMounts" + }, + "resources": { + "description": "Resource requirements for the InitContainer.", + "$ref": "#/definitions/resources" + }, + "securityContext": { + "$ref": "#/definitions/securityContext", + "description": "The security context for the InitContainer." + } + } + } + }, + "resources": { + "description": "Resource limits and requests for the pod.", + "$ref": "#/definitions/resources" + }, + "containerPortName": { + "description": "Name of the port for the main container.", + "type": "string", + "default": "http" + }, + "livenessProbe": { + "description": "Liveness probe configuration.", + "type": "object" + }, + "readinessProbe": { + "description": "Readiness probe configuration.", + "type": "object" + }, + "service": { + "description": "Service configuration.", + "type": "object", + "required": ["type", "port"], + "properties": { + "annotations": { + "description": "Annotations to add to the service.", + "type": "object" + }, + "type": { + "description": "Service type.", + "type": "string" + }, + "port": { + "description": "Port number for the service.", + "type": "integer" + }, + "clusterPort": { + "description": "Port number for the cluster.", + "type": "integer" + }, + "loadBalancerIP": { + "description": "External IP to assign when the service type is LoadBalancer.", + "type": "string" + }, + "loadBalancerSourceRanges": { + "description": "IP ranges to allow access to the loadBalancerIP.", + "type": "array", + "items": { + "type": "string" + } + }, + "nodePort": { + "description": "Specific nodePort to force when service type is NodePort.", + "type": "integer" + } + } + }, + "ingress": { + "description": "Ingress configuration.", + "type": "object", + "properties": { + "enabled": { + "description": "Indicates if Ingress is enabled.", + "type": "boolean" + }, + "className": { + "description": "Ingress class name.", + "type": "string" + }, + "annotations": { + "description": "Annotations to add to the Ingress.", + "type": "object" + }, + "hosts": { + "description": "Host and path configuration for the Ingress.", + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "description": "Host name for the Ingress.", + "type": "string" + }, + "paths": { + "description": "Path configuration for the Ingress.", + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "description": "Path for the Ingress.", + "type": "string" + }, + "pathType": { + "description": "Path type for the Ingress.", + "type": "string" + } + } + } + } + } + } + }, + "tls": { + "description": "TLS configuration for the Ingress.", + "type": "array", + "items": { + "type": "object", + "properties": { + "secretName": { + "description": "Name of the secret for TLS.", + "type": "string" + }, + "hosts": { + "description": "Host names for the TLS configuration.", + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "nodeSelector": { + "description": "Node selector for pod assignment.", + "type": "object" + }, + "tolerations": { + "description": "Tolerations for pod assignment.", + "type": "array" + }, + "affinity": { + "description": "Affinity rules for pod assignment.", + "type": "object" + }, + "podAntiAffinity": { + "description": "Pod anti-affinity configuration.", + "type": "string", + "enum": ["", "soft", "hard"], + "default": "" + }, + "podAntiAffinityTopologyKey": { + "description": "Topology key to use for pod anti-affinity.", + "type": "string" + }, + "topologySpreadConstraints": { + "description": "Topology spread constraints for pod assignment.", + "type": "array", + "items": { + "type": "object", + "required": ["maxSkew", "topologyKey", "whenUnsatisfiable", "labelSelector"], + "properties": { + "maxSkew": { + "type": "integer" + }, + "topologyKey": { + "type": "string" + }, + "whenUnsatisfiable": { + "type": "string", + "enum": ["DoNotSchedule", "ScheduleAnyway"] + }, + "labelSelector": { + "type": "object", + "required": ["matchLabels"], + "properties": { + "matchLabels": { + "type": "object" + } + } + } + } + } + }, + "statefulSet": { + "description": "StatefulSet configuration for managing pods.", + "type": "object", + "properties": { + "annotations": { + "type": "object" + } + } + }, + "podAnnotations": { + "description": "Annotations to add to the pods.", + "type": "object" + }, + "podLabels": { + "description": "Labels to add to the pods.", + "type": "object" + }, + "podDisruptionBudget": { + "description": "Pod disruption budget configuration.", + "type": "object", + "properties": { + "maxUnavailable": { + "type": "integer" + }, + "minAvailable": { + "type": "integer" + } + } + }, + "command": { + "description": "The command to be executed in the container.", + "type": "array", + "items": { + "type": "string" + } + }, + "persistence": { + "description": "Persistence configuration for storing data.", + "type": "object", + "required": ["enabled", "size"], + "properties": { + "enabled": { + "type": "boolean" + }, + "storageClass": { + "type": "string" + }, + "accessModes": { + "type": "array", + "items": { + "type": "string" + } + }, + "size": { + "type": "string" + }, + "annotations": { + "type": "object", + "description": "Custom annotations to apply to the PersistentVolumeClaim created by the Alertmanager StatefulSet.", + "additionalProperties": { + "type": "string" + }, + "default": {} + }, + "labels": { + "type": "object", + "description": "Custom labels to apply to the PersistentVolumeClaim created by the Alertmanager StatefulSet.", + "additionalProperties": { + "type": "string" + }, + "default": {} + } + } + }, + "configAnnotations": { + "description": "Annotations to be added to the Alertmanager configuration.", + "type": "object" + }, + "config": { + "description": "Alertmanager configuration.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether to create alermanager configmap or not.", + "type": "boolean" + }, + "global": { + "description": "Global configuration options.", + "type": "object" + }, + "templates": { + "description": "Alertmanager template files.", + "type": "array", + "items": { + "type": "string" + } + }, + "receivers": { + "description": "Alert receivers configuration.", + "type": "array", + "items": { + "type": "object", + "required": ["name"], + "properties": { + "name": { + "description": "The unique name of the receiver.", + "type": "string" + } + } + } + }, + "route": { + "description": "Alert routing configuration.", + "type": "object", + "$ref": "#/definitions/config/route" + } + } + }, + "configmapReload": { + "description": "Monitors ConfigMap changes and POSTs to a URL.", + "type": "object", + "properties": { + "enabled": { + "description": "Specifies whether the configmap-reload container should be deployed.", + "type": "boolean", + "default": false + }, + "name": { + "description": "The name of the configmap-reload container.", + "type": "string" + }, + "image": { + "description": "The container image for the configmap-reload container.", + "$ref": "#/definitions/image" + }, + "containerPort": { + "description": "Port number for the configmap-reload container.", + "type": "integer" + }, + "resources": { + "description": "Resource requests and limits for the configmap-reload container.", + "$ref": "#/definitions/resources" + } + } + }, + "templates": { + "description": "Custom templates used by Alertmanager.", + "type": "object" + }, + "extraVolumeMounts": { + "description": "List of volume mounts for the Container.", + "$ref": "#/definitions/volumeMounts" + }, + "extraVolumes": { + "description": "Additional volumes to be mounted in the Alertmanager pod.", + "type": "array", + "default": [], + "items": { + "type": "object", + "required": ["name"], + "properties": { + "name": { + "type": "string" + } + } + } + }, + "extraEnv": { + "description": "List of environment variables for the Container.", + "$ref": "#/definitions/env" + }, + "testFramework": { + "description": "Configuration for the test Pod.", + "type": "object", + "properties": { + "enabled": { + "description": "Specifies whether the test Pod is enabled.", + "type": "boolean", + "default": false + }, + "annotations": { + "description": "Annotations to be added to the test Pod.", + "type": "object" + } + } + }, + "verticalPodAutoscaler": { + "description": "Vertical Pod Autoscaling configuration.", + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "recommenders": { + "type": "array" + }, + "updatePolicy": { + "type": "object" + }, + "resourcePolicy": { + "type": "object" + } + } + }, + "extraPodConfigs": { + "description": "Object to allow users to add additional Pod configuration like dnsPolicy or hostNetwork", + "type": "object" + } + } +} diff --git a/opencloud/charts/prometheus/charts/alertmanager/values.yaml b/opencloud/charts/prometheus/charts/alertmanager/values.yaml new file mode 100644 index 0000000..e4fae62 --- /dev/null +++ b/opencloud/charts/prometheus/charts/alertmanager/values.yaml @@ -0,0 +1,440 @@ +# yaml-language-server: $schema=values.schema.json +# Default values for alertmanager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +image: + repository: quay.io/prometheus/alertmanager + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +# Full external URL where alertmanager is reachable, used for backlinks. +baseURL: "" + +extraArgs: {} + +## Additional Alertmanager Secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +## namespaceOverride overrides the namespace which the resources will be deployed in +namespaceOverride: "" + +automountServiceAccountToken: true + +## Running within a user namespace. +# Kubernetes server must be at or later than version v1.25. +# Kubernetes v1.25 through to v1.27 recognise UserNamespacesStatelessPodsSupport. +# Kubernetes v1.28 through to v1.32 need to enable the UserNamespacesSupport feature gate. +hostUsers: false + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# Sets priorityClassName in alertmanager pod +priorityClassName: "" + +# Sets schedulerName in alertmanager pod +schedulerName: "" + +podSecurityContext: + fsGroup: 65534 +dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 +hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # - ip: "10.1.2.3" + # hostnames: + # - "foo.remote" + # - "bar.remote" +securityContext: + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + +additionalPeers: [] + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +## Additional containers to add to the stateful set. This will allow to setup sidecarContainers like a proxy to integrate +## alertmanager with an external tool like teams that has not direct integration. +## +extraContainers: [] + +containerPortName: &containerPortName http + +livenessProbe: + httpGet: + path: / + port: *containerPortName + +readinessProbe: + httpGet: + path: / + port: *containerPortName + +service: + annotations: {} + labels: {} + type: ClusterIP + port: 9093 + clusterPort: 9094 + loadBalancerIP: "" # Assign ext IP when Service type is LoadBalancer + loadBalancerSourceRanges: [] # Only allow access to loadBalancerIP from these IPs + # if you want to force a specific nodePort. Must be use with service.type=NodePort + # nodePort: + + # Optionally specify extra list of additional ports exposed on both services + extraPorts: [] + + # ip dual stack + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + +# Configuration for creating a separate Service for each statefulset Alertmanager replica +# +servicePerReplica: + enabled: false + annotations: {} + + # Loadbalancer source IP ranges + # Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + # Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + # + externalTrafficPolicy: Cluster + + # Service type + # + type: ClusterIP + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: alertmanager.domain.com + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - alertmanager.domain.com + +# Configuration for creating an Ingress that will map to each Alertmanager replica service +# alertmanager.servicePerReplica must be enabled +# +ingressPerReplica: + enabled: false + + # className for the ingresses + # + className: "" + + annotations: {} + labels: {} + + # Final form of the hostname for each per replica ingress is + # {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + # + # Prefix for the per replica ingress that will have `-$replicaNumber` + # appended to the end + hostPrefix: "alertmanager" + # Domain that will be used for the per replica ingress + hostDomain: "domain.com" + + # Paths to use for ingress rules + # + paths: + - / + + # PathType for ingress rules + # + pathType: ImplementationSpecific + + # Secret name containing the TLS certificate for alertmanager per replica ingress + # Secret must be manually created in the namespace + tlsSecretName: "" + + # Separated secret for each per replica Ingress. Can be used together with cert-manager + # + tlsSecretPerReplica: + enabled: false + # Final form of the secret for each per replica ingress is + # {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + # + prefix: "alertmanager" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 32Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +## Pod anti-affinity can prevent the scheduler from placing Alertmanager replicas on the same node. +## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. +## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. +## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. +## +podAntiAffinity: "" + +## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. +## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone +## +podAntiAffinityTopologyKey: kubernetes.io/hostname + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: alertmanager + +statefulSet: + annotations: {} + +## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to +## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). +## This is an alpha field from kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds +## feature gate. +## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#minimum-ready-seconds +minReadySeconds: 0 + +podAnnotations: {} +podLabels: {} + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + +command: [] + +persistence: + ## If true, storage will create or use Persistence Volume + ## If false, storage will use emptyDir + ## + enabled: true + + ## Custom annotations for the PVC created by the alertmanager StatefulSet. + ## Useful for configuring storage provider options such as disk type, KMS encryption keys, or custom volume name prefixes. + annotations: {} + + ## Custom labels for the PVC created by the alertmanager StatefulSet. + ## Useful for selecting, grouping, and organizing so that they can be queried or targeted in deployments, policies, etc. + labels: {} + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 50Mi + + ## Configure emptyDir volume + ## + emptyDir: {} + +configAnnotations: {} + ## For example if you want to provide private data from a secret vault + ## https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-secrets-webhook + ## P.s.: Add option `configMapMutation: true` for vault-secrets-webhook + # vault.security.banzaicloud.io/vault-role: "admin" + # vault.security.banzaicloud.io/vault-addr: "https://vault.vault.svc.cluster.local:8200" + # vault.security.banzaicloud.io/vault-skip-verify: "true" + # vault.security.banzaicloud.io/vault-path: "kubernetes" + ## Example for inject secret + # slack_api_url: '${vault:secret/data/slack-hook-alerts#URL}' + +config: + enabled: true + global: {} + # slack_api_url: '' + + templates: + - '/etc/alertmanager/*.tmpl' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + ## If false, the configmap-reload container will not be deployed + ## + enabled: false + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.86.1 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + + livenessProbe: {} + # httpGet: + # path: /healthz + # port: 8080 + # scheme: HTTP + readinessProbe: {} + # httpGet: + # path: /healthz + # port: 8080 + # scheme: HTTP + + extraArgs: {} + + ## Optionally specify extra list of additional volumeMounts + extraVolumeMounts: [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + + ## Optionally specify extra environment variables to add to alertmanager container + extraEnv: [] + # - name: FOO + # value: BAR + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsUser: 65534 + # runAsNonRoot: true + # runAsGroup: 65534 + +templates: {} +# alertmanager.tmpl: |- + +## Optionally specify extra list of additional volumeMounts +extraVolumeMounts: [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +## Optionally specify extra list of additional volumes +extraVolumes: [] + # - name: extras + # emptyDir: {} + +## Optionally specify extra environment variables to add to alertmanager container +extraEnv: [] + # - name: FOO + # value: BAR + +testFramework: + enabled: false + annotations: + "helm.sh/hook": test-success + # "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + +# --- Vertical Pod Autoscaler +verticalPodAutoscaler: + # -- Use VPA for alertmanager + enabled: false + # recommenders: + # - name: 'alternative' + # updatePolicy: + # updateMode: "Auto" + # minReplicas: 1 + # resourcePolicy: + # containerPolicies: + # - containerName: '*' + # minAllowed: + # cpu: 100m + # memory: 128Mi + # maxAllowed: + # cpu: 1 + # memory: 500Mi + # controlledResources: ["cpu", "memory"] + +# --- Extra Pod Configs +extraPodConfigs: {} + # dnsPolicy: ClusterFirstWithHostNet + # hostNetwork: true diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/.helmignore b/opencloud/charts/prometheus/charts/kube-state-metrics/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/Chart.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 0000000..6f39b66 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: 2.17.0 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 + url: https://github.com/tariq1890 +- email: manuel@rueg.eu + name: mrueg + url: https://github.com/mrueg +- email: david@0xdc.me + name: dotdc + url: https://github.com/dotdc +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +type: application +version: 6.4.1 diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/README.md b/opencloud/charts/prometheus/charts/kube-state-metrics/README.md new file mode 100644 index 0000000..9348d4c --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/README.md @@ -0,0 +1,87 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Usage + +The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/). + +- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/kube-state-metrics` +- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `kube-state-metrics` + +The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository. + +### Install Chart + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +### Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +### Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +#### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics + +You can upgrade in-place: + +1. [upgrade](#upgrading-chart) your existing release name using the new chart repository + +## Upgrading to v6.0.0 + +This version drops support for deprecated Pod Security Policy resources. + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: + +- Dropped support for helm v2 (helm v3 or later is required) +- collectors key was renamed to resources +- namespace key was renamed to namespaces + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values oci://ghcr.io/prometheus-community/charts/kube-state-metrics +``` + +### kube-rbac-proxy + +You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry). +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-state-metrics-read +rules: + - apiGroups: [ "" ] + resources: ["services/kube-state-metrics"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 0000000..3589c24 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,23 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints are now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "kube-state-metrics.fullname" . }}"] + verbs: + - get +``` +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 0000000..c8cfa56 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,186 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kube-state-metrics.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate basic labels +*/}} +{{- define "kube-state-metrics.labels" }} +helm.sh/chart: {{ template "kube-state-metrics.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }} +{{- include "kube-state-metrics.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Values.customLabels }} +{{ tpl (toYaml .Values.customLabels) . }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kube-state-metrics.selectorLabels" }} +{{- if .Values.selectorOverride }} +{{ toYaml .Values.selectorOverride }} +{{- else }} +app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for servicemonitor */}} +{{- define "servicemonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end -}} + +{{/* Sets default scrape limits for scrapeconfig */}} +{{- define "scrapeconfig.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end -}} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "kube-state-metrics.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +The image to use for kube-state-metrics +*/}} +{{- define "kube-state-metrics.image" -}} +{{- if .Values.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +The image to use for kubeRBACProxy +*/}} +{{- define "kubeRBACProxy.image" -}} +{{- if .Values.kubeRBACProxy.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +The name of the ConfigMap for the customResourceState config. +*/}} +{{- define "kube-state-metrics.crsConfigMapName" -}} + {{- if ne .Values.customResourceState.name "" }} + {{- .Values.customResourceState.name }} + {{- else }} + {{- template "kube-state-metrics.fullname" . }}-customresourcestate-config + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml new file mode 100644 index 0000000..025cd47 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "cilium") }} +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +spec: + endpointSelector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + egress: + {{- if and .Values.networkPolicy.cilium .Values.networkPolicy.cilium.kubeApiServerSelector }} + {{ toYaml .Values.networkPolicy.cilium.kubeApiServerSelector | nindent 6 }} + {{- else }} + - toEntities: + - kube-apiserver + {{- end }} + ingress: + - toPorts: + - ports: + - port: {{ .Values.service.port | quote }} + protocol: TCP + {{- if .Values.selfMonitor.enabled }} + - port: {{ .Values.selfMonitor.telemetryPort | default 8081 | quote }} + protocol: TCP + {{ end }} +{{ end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..cf9f628 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml new file mode 100644 index 0000000..e64c4a7 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.customResourceState.enabled .Values.customResourceState.create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-state-metrics.crsConfigMapName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} +data: + {{ .Values.customResourceState.key }}: | + {{- toYaml .Values.customResourceState.config | nindent 4 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 0000000..9425092 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,379 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + replicas: {{ .Values.replicas }} + {{- if not .Values.autosharding.enabled }} + strategy: + type: {{ .Values.updateStrategy | default "RollingUpdate" }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] + {{- end }} + template: + metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: + {{ toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- with .Values.initContainers }} + initContainers: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{ toYaml .Values.dnsConfig | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.dnsPolicy }} + containers: + {{- $servicePort := ternary .Values.kubeRBACProxy.port (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} + - name: {{ template "kube-state-metrics.name" . }} + {{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.env }} + {{- toYaml .Values.env | nindent 8 }} + {{- end }} + {{ else }} + {{- if .Values.env }} + env: + {{- toYaml .Values.env | nindent 8 }} + {{- end }} + {{- end }} + args: + {{- if .Values.extraArgs }} + {{- .Values.extraArgs | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - --host=127.0.0.1 + {{- end }} + - --port={{ $servicePort }} + {{- if .Values.collectors }} + - --resources={{ .Values.collectors | join "," }} + {{- end }} + {{- if .Values.metricLabelsAllowlist }} + - --metric-labels-allowlist={{ .Values.metricLabelsAllowlist | join "," }} + {{- end }} + {{- if .Values.metricAnnotationsAllowList }} + - --metric-annotations-allowlist={{ .Values.metricAnnotationsAllowList | join "," }} + {{- end }} + {{- if .Values.metricAllowlist }} + - --metric-allowlist={{ .Values.metricAllowlist | join "," }} + {{- end }} + {{- if .Values.metricDenylist }} + - --metric-denylist={{ .Values.metricDenylist | join "," }} + {{- end }} + {{- $namespaces := list }} + {{- if .Values.namespaces }} + {{- range $ns := join "," .Values.namespaces | split "," }} + {{- $namespaces = append $namespaces (tpl $ns $) }} + {{- end }} + {{- end }} + {{- if .Values.releaseNamespace }} + {{- $namespaces = append $namespaces ( include "kube-state-metrics.namespace" . ) }} + {{- end }} + {{- if $namespaces }} + - --namespaces={{ $namespaces | mustUniq | join "," }} + {{- end }} + {{- if .Values.namespacesDenylist }} + - --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }} + {{- end }} + {{- if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - --telemetry-host=127.0.0.1 + - --telemetry-port={{ $telemetryPort }} + {{- else }} + {{- if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} + {{- end }} + {{- if .Values.selfMonitor.telemetryPort }} + - --telemetry-port={{ $telemetryPort }} + {{- end }} + {{- end }} + {{- if .Values.customResourceState.enabled }} + - --custom-resource-state-config-file=/etc/customresourcestate/{{ .Values.customResourceState.key }} + {{- end }} + {{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumeMounts) }} + volumeMounts: + {{- if .Values.kubeconfig.enabled }} + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true + {{- end }} + {{- if .Values.customResourceState.enabled }} + - name: customresourcestate-config + mountPath: /etc/customresourcestate + readOnly: true + {{- end }} + {{- if .Values.volumeMounts }} +{{ toYaml .Values.volumeMounts | indent 8 }} + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ include "kube-state-metrics.image" . }} + {{- if eq .Values.kubeRBACProxy.enabled false }} + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: http + {{- if .Values.selfMonitor.enabled }} + - containerPort: {{ $telemetryPort }} + name: metrics + {{- end }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + httpGet: + {{- if .Values.hostNetwork }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.startupProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: /healthz + {{- if .Values.kubeRBACProxy.enabled }} + port: http + scheme: HTTPS + {{- else }} + port: {{ $servicePort }} + scheme: {{ upper .Values.startupProbe.httpGet.scheme }} + {{- end }} + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + {{- if .Values.hostNetwork }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: /livez + {{- if .Values.kubeRBACProxy.enabled }} + port: http + scheme: HTTPS + {{- else }} + port: {{ $servicePort }} + scheme: {{ upper .Values.livenessProbe.httpGet.scheme }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + httpGet: + {{- if .Values.hostNetwork }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: /readyz + {{- if .Values.kubeRBACProxy.enabled }} + port: metrics + scheme: HTTPS + {{- else }} + port: {{ $telemetryPort }} + scheme: {{ upper .Values.readinessProbe.httpGet.scheme }} + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-http + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.kubeRBACProxy.ignoreProbePaths }} + - --ignore-paths=/livez,/readyz + {{- end }} + - --secure-listen-address=:{{ .Values.service.port | default 8080}} + - --upstream=http://127.0.0.1:{{ $servicePort }}/ + - --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort | default 8888 }} + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + {{- with .Values.kubeRBACProxy.volumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: http + - containerPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort | default 8888 }} + name: http-healthz + readinessProbe: + httpGet: + scheme: HTTPS + port: http-healthz + path: /healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- if .Values.selfMonitor.enabled }} + - name: kube-rbac-proxy-telemetry + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.kubeRBACProxy.ignoreProbePaths }} + - --ignore-paths=/livez,/readyz + {{- end }} + - --secure-listen-address=:{{ .Values.selfMonitor.telemetryPort | default 8081 }} + - --upstream=http://127.0.0.1:{{ $telemetryPort }}/ + - --proxy-endpoints-port=8889 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + {{- with .Values.kubeRBACProxy.volumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + name: metrics + - containerPort: 8889 + name: metrics-healthz + readinessProbe: + httpGet: + scheme: HTTPS + port: 8889 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- end }} + {{- end }} + {{- with .Values.containers }} + {{- toYaml . | nindent 6 }} + {{- end }} +{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- if kindIs "map" .Values.affinity }} + {{- toYaml .Values.affinity | nindent 8 }} + {{- else }} + {{- tpl .Values.affinity $ | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumes) (.Values.kubeRBACProxy.enabled) }} + volumes: + {{- if .Values.kubeconfig.enabled}} + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + {{- end }} + {{- if .Values.kubeRBACProxy.enabled}} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config + {{- end }} + {{- if .Values.customResourceState.enabled}} + - name: customresourcestate-config + configMap: + name: {{ template "kube-state-metrics.crsConfigMapName" . }} + {{- end }} + {{- if .Values.volumes }} +{{ toYaml .Values.volumes | indent 8 }} + {{- end }} + {{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml new file mode 100644 index 0000000..567f7bf --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml new file mode 100644 index 0000000..6af0084 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml new file mode 100644 index 0000000..abe292a --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "kubernetes") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +spec: + {{- if .Values.networkPolicy.egress }} + ## Deny all egress by default + egress: + {{- toYaml .Values.networkPolicy.egress | nindent 4 }} + {{- end }} + ingress: + {{- if .Values.networkPolicy.ingress }} + {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} + {{- else }} + ## Allow ingress on default ports by default + - ports: + - port: http + protocol: TCP + {{- if .Values.selfMonitor.enabled }} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} + - port: {{ $telemetryPort }} + protocol: TCP + {{- end }} + {{- end }} + podSelector: + {{- if .Values.networkPolicy.podSelector }} + {{- toYaml .Values.networkPolicy.podSelector | nindent 4 }} + {{- else }} + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + policyTypes: + - Ingress + - Egress +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml new file mode 100644 index 0000000..2d1e64b --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml new file mode 100644 index 0000000..671dc9d --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "kube-state-metrics.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "kube-state-metrics.fullname" . }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/role.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/role.yaml new file mode 100644 index 0000000..4b6537b --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/role.yaml @@ -0,0 +1,236 @@ +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq $.Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq $.Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if has "certificatesigningrequests" $.Values.collectors }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if has "configmaps" $.Values.collectors }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if has "cronjobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "daemonsets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "deployments" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpoints" $.Values.collectors }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpointslices" $.Values.collectors }} +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: ["list", "watch"] +{{ end -}} +{{ if has "horizontalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "ingresses" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "jobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "leases" $.Values.collectors }} +- apiGroups: ["coordination.k8s.io"] + resources: + - leases + verbs: ["list", "watch"] +{{ end -}} +{{ if has "limitranges" $.Values.collectors }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if has "mutatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "namespaces" $.Values.collectors }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if has "networkpolicies" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if has "ingressclasses" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - ingressclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "clusterrolebindings" $.Values.collectors }} +- apiGroups: ["rbac.authorization.k8s.io"] + resources: + - clusterrolebindings + verbs: ["list", "watch"] +{{ end -}} +{{ if has "clusterroles" $.Values.collectors }} +- apiGroups: ["rbac.authorization.k8s.io"] + resources: + - clusterroles + verbs: ["list", "watch"] +{{ end -}} +{{ if has "roles" $.Values.collectors }} +- apiGroups: ["rbac.authorization.k8s.io"] + resources: + - roles + verbs: ["list", "watch"] +{{ end -}} +{{ if has "nodes" $.Values.collectors }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumeclaims" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumes" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "poddisruptionbudgets" $.Values.collectors }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "pods" $.Values.collectors }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicasets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicationcontrollers" $.Values.collectors }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "resourcequotas" $.Values.collectors }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if has "secrets" $.Values.collectors }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "services" $.Values.collectors }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if has "statefulsets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "storageclasses" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "validatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "volumeattachments" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{- if $.Values.kubeRBACProxy.enabled }} +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] +{{- end }} +{{- if $.Values.customResourceState.enabled }} +- apiGroups: ["apiextensions.k8s.io"] + resources: + - customresourcedefinitions + verbs: ["list", "watch"] +{{- end }} +{{ if $.Values.rbac.extraRules }} +{{ toYaml $.Values.rbac.extraRules }} +{{ end }} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml new file mode 100644 index 0000000..330651b --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (join "," $.Values.namespaces) | split "," }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/scrapeconfig.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/scrapeconfig.yaml new file mode 100644 index 0000000..028f3d1 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/scrapeconfig.yaml @@ -0,0 +1,60 @@ +{{- if .Values.prometheus.scrapeconfig.enabled }} +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.prometheus.scrapeconfig.additionalLabels }} + {{- tpl (toYaml . | nindent 4) $ }} + {{- end }} + {{- with .Values.prometheus.scrapeconfig.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $ }} + {{- end }} +spec: + {{- include "scrapeconfig.scrapeLimits" .Values.prometheus.scrapeconfig | indent 2 }} + staticConfigs: + - targets: + - {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc:{{ .Values.service.port }} + {{- if .Values.prometheus.scrapeconfig.staticConfigLabels}} + labels: + {{- with .Values.prometheus.scrapeconfig.staticConfigLabels }} + {{- tpl (toYaml . | nindent 8) $ }} + {{- end }} + {{- end }} +{{- if .Values.prometheus.scrapeconfig.jobName }} + jobName: {{ .Values.prometheus.scrapeconfig.jobName }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.honorLabels }} + honorLabels: true +{{- end }} +{{- if .Values.prometheus.scrapeconfig.scrapeInterval }} + scrapeInterval: {{ .Values.prometheus.scrapeconfig.scrapeInterval }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.scrapeconfig.scrapeTimeout }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.proxyUrl }} + proxyUrl: {{ .Values.prometheus.scrapeconfig.proxyUrl }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.enableHttp2 }} + enableHttp2: {{ .Values.prometheus.scrapeconfig.enableHttp2 }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.scrapeconfig.metricRelabelings | nindent 4 }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.scrapeconfig.relabelings | nindent 4 }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.scheme }} + scheme: {{ .Values.prometheus.scrapeconfig.scheme }} +{{- end }} +{{- if .Values.prometheus.scrapeconfig.tlsConfig }} + tlsConfig: + {{- toYaml (.Values.prometheus.scrapeconfig.tlsConfig ) | nindent 4 }} +{{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/service.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 0000000..4bfa7df --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + {{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} + {{- end }} + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port | default 8080}} + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: http + {{ if .Values.selfMonitor.enabled }} + - name: metrics + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: metrics + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.selfMonitor.telemetryNodePort }} + {{- end }} + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} +{{- if .Values.autosharding.enabled }} + clusterIP: None +{{- else if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} + selector: + {{- include "kube-state-metrics.selectorLabels" . | indent 4 }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 0000000..c302bc7 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} +{{- end }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 0000000..99d7fa9 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,120 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- tpl (toYaml . | nindent 4) $ }} + {{- end }} + {{- with .Values.prometheus.monitor.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $ }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + {{- with .Values.prometheus.monitor.targetLabels }} + targetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | indent 2 }} + {{- if .Values.prometheus.monitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- with .Values.prometheus.monitor.namespaceSelector }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + selector: + matchLabels: + {{- with .Values.prometheus.monitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + endpoints: + - port: http + {{- if or .Values.prometheus.monitor.http.interval .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.http.interval | default .Values.prometheus.monitor.interval }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.scrapeTimeout .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.http.scrapeTimeout | default .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.proxyUrl .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.http.proxyUrl | default .Values.prometheus.monitor.proxyUrl }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.enableHttp2 .Values.prometheus.monitor.enableHttp2 }} + enableHttp2: {{ .Values.prometheus.monitor.http.enableHttp2 | default .Values.prometheus.monitor.enableHttp2 }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.honorLabels .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if or .Values.prometheus.monitor.http.metricRelabelings .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml (.Values.prometheus.monitor.http.metricRelabelings | default .Values.prometheus.monitor.metricRelabelings) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.relabelings .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml (.Values.prometheus.monitor.http.relabelings | default .Values.prometheus.monitor.relabelings) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.scheme .Values.prometheus.monitor.scheme }} + scheme: {{ .Values.prometheus.monitor.http.scheme | default .Values.prometheus.monitor.scheme }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.tlsConfig .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml (.Values.prometheus.monitor.http.tlsConfig | default .Values.prometheus.monitor.tlsConfig) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.http.bearerTokenFile .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.prometheus.monitor.http.bearerTokenFile | default .Values.prometheus.monitor.bearerTokenFile }} + {{- end }} + {{- with (.Values.prometheus.monitor.http.bearerTokenSecret | default .Values.prometheus.monitor.bearerTokenSecret) }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.selfMonitor.enabled }} + - port: metrics + {{- if or .Values.prometheus.monitor.metrics.interval .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.metrics.interval | default .Values.prometheus.monitor.interval }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.scrapeTimeout .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.metrics.scrapeTimeout | default .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.proxyUrl .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.metrics.proxyUrl | default .Values.prometheus.monitor.proxyUrl }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.enableHttp2 .Values.prometheus.monitor.enableHttp2 }} + enableHttp2: {{ .Values.prometheus.monitor.metrics.enableHttp2 | default .Values.prometheus.monitor.enableHttp2 }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.honorLabels .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.metricRelabelings .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml (.Values.prometheus.monitor.metrics.metricRelabelings | default .Values.prometheus.monitor.metricRelabelings) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.relabelings .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml (.Values.prometheus.monitor.metrics.relabelings | default .Values.prometheus.monitor.relabelings) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.scheme .Values.prometheus.monitor.scheme }} + scheme: {{ .Values.prometheus.monitor.metrics.scheme | default .Values.prometheus.monitor.scheme }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.tlsConfig .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml (.Values.prometheus.monitor.metrics.tlsConfig | default .Values.prometheus.monitor.tlsConfig) | nindent 8 }} + {{- end }} + {{- if or .Values.prometheus.monitor.metrics.bearerTokenFile .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.prometheus.monitor.metrics.bearerTokenFile | default .Values.prometheus.monitor.bearerTokenFile }} + {{- end }} + {{- with (.Values.prometheus.monitor.metrics.bearerTokenSecret | default .Values.prometheus.monitor.bearerTokenSecret) }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 0000000..489de14 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 0000000..73b37a4 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml new file mode 100644 index 0000000..f46305b --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml @@ -0,0 +1,44 @@ +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + {{- with .Values.verticalPodAutoscaler.recommenders }} + recommenders: + {{- toYaml . | nindent 4 }} + {{- end }} + resourcePolicy: + containerPolicies: + - containerName: {{ template "kube-state-metrics.name" . }} + {{- with .Values.verticalPodAutoscaler.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.controlledValues }} + controlledValues: {{ .Values.verticalPodAutoscaler.controlledValues }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.maxAllowed }} + maxAllowed: + {{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.minAllowed }} + minAllowed: + {{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }} + {{- end }} + targetRef: + apiVersion: apps/v1 + {{- if .Values.autosharding.enabled }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ template "kube-state-metrics.fullname" . }} + {{- with .Values.verticalPodAutoscaler.updatePolicy }} + updatePolicy: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/kube-state-metrics/values.yaml b/opencloud/charts/prometheus/charts/kube-state-metrics/values.yaml new file mode 100644 index 0000000..188d206 --- /dev/null +++ b/opencloud/charts/prometheus/charts/kube-state-metrics/values.yaml @@ -0,0 +1,611 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics + # If unset use v + .Charts.appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# Change the deployment strategy when autosharding is disabled. +# ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +# The default is "RollingUpdate" as per Kubernetes defaults. +# During a release, 'RollingUpdate' can lead to two running instances for a short period of time while 'Recreate' can create a small gap in data. +# updateStrategy: Recreate + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/main/docs/developer/cli-arguments.md +extraArgs: [] + +# If false then the user will opt out of automounting API credentials. +automountServiceAccountToken: true + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + nodePort: 0 + loadBalancerIP: "" + # Only allow access to the loadBalancerIP from these IPs + loadBalancerSourceRanges: [] + clusterIP: "" + annotations: {} + +## Additional labels to add to all resources +customLabels: {} + # app: kube-state-metrics + +## Override selector labels +selectorOverride: {} + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + + # Add permissions for CustomResources' apiGroups in Role/ClusterRole. Should be used in conjunction with Custom Resource State Metrics configuration + # Example: + # - apiGroups: ["monitoring.coreos.com"] + # resources: ["prometheuses"] + # verbs: ["list", "watch"] + extraRules: [] + +# Configure kube-rbac-proxy. When enabled, creates one kube-rbac-proxy container per exposed HTTP endpoint (metrics and telemetry if enabled). +# The requests are served through the same service but requests are then HTTPS. +kubeRBACProxy: + enabled: false + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.20.0 + sha: "" + pullPolicy: IfNotPresent + + # This set --ignore-paths=/livez,/readyz to kubeRBACProxy container args + # to allow the pod probes working properly with kubeRBACProxy enabled. + ignoreProbePaths: true + + # List of additional cli arguments to configure kube-rbac-prxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + + # Configure specific upstream port for kube-state-metrics container + port: 9090 + # Configure specific proxy endpoints port + # This port is for healthz on readinessProbe kube-rbac-proxy-http container + proxyEndpointsPort: 8888 + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## volumeMounts enables mounting custom volumes in rbac-proxy containers + ## Useful for TLS certificates and keys + volumeMounts: [] + # - mountPath: /etc/tls + # name: kube-rbac-proxy-tls + # readOnly: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + # If false then the user will opt out of automounting API credentials. + automountServiceAccountToken: true + +# Additional Environment variables +env: [] + # - name: GOMAXPROCS + # valueFrom: + # resourceFieldRef: + # resource: limits.cpu + +prometheus: + monitor: + enabled: false + annotations: {} + additionalLabels: {} + namespace: "" + namespaceSelector: [] + jobLabel: "" + targetLabels: [] + podTargetLabels: [] + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + selectorOverride: {} + + ## kube-state-metrics endpoint + http: + interval: "" + scrapeTimeout: "" + proxyUrl: "" + ## Whether to enable HTTP2 for servicemonitor + enableHttp2: false + honorLabels: false + metricRelabelings: [] + relabelings: [] + scheme: "" + ## File to read bearer token for scraping targets + bearerTokenFile: "" + ## Secret to mount to read bearer token for scraping targets. The secret needs + ## to be in the same namespace as the service monitor and accessible by the + ## Prometheus Operator + bearerTokenSecret: {} + # name: secret-name + # key: key-name + tlsConfig: {} + + ## selfMonitor endpoint + metrics: + interval: "" + scrapeTimeout: "" + proxyUrl: "" + ## Whether to enable HTTP2 for servicemonitor + enableHttp2: false + honorLabels: false + metricRelabelings: [] + relabelings: [] + scheme: "" + ## File to read bearer token for scraping targets + bearerTokenFile: "" + ## Secret to mount to read bearer token for scraping targets. The secret needs + ## to be in the same namespace as the service monitor and accessible by the + ## Prometheus Operator + bearerTokenSecret: {} + # name: secret-name + # key: key-name + tlsConfig: {} + ## Create a scrapeConfig resource for scraping the kube-state-metrics service. Use this instead of serviceMonitor + ## to have more instances of kube-state-metrics safety. + scrapeconfig: + ## To avoid duplicate metrics, first disable the serviceMonitor creation via prometheus.monitor.enabled=false + enabled: false + annotations: {} + additionalLabels: {} + jobName: kube-state-metrics + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## StaticConfigLabels defines the labels to be used in the Prometheus static configuration for scraping. + staticConfigLabels: {} + scrapeInterval: "" + scrapeTimeout: "" + proxyUrl: "" + ## Whether to enable HTTP2 for scrapeconfig + enableHttp2: false + honorLabels: true + metricRelabelings: [] + relabelings: [] + scheme: "" + tlsConfig: {} + +## Configure network policy for kube-state-metrics +networkPolicy: + enabled: false + # networkPolicy.flavor -- Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + ## Configure the cilium network policy kube-apiserver selector + # cilium: + # kubeApiServerSelector: + # - toEntities: + # - kube-apiserver + + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app.kubernetes.io/name: kube-state-metrics + +securityContext: + enabled: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Can be defined as either a dict or string. String is useful for `tpl` templating. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} +# affinity: | +# podAntiAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# - labelSelector: +# matchLabels: +# {{- include "kube-state-metrics.selectorLabels" . | indent 10 }} +# topologyKey: kubernetes.io/hostname + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Topology spread constraints for pod assignment +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + +# Annotations to be added to the deployment/statefulset +annotations: {} + +# Labels to be added to the deployment/statefulset +labels: {} + +# Annotations to be added to the pod +podAnnotations: {} + +# Labels to be added to the pod +podLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Comma-separated list of metrics to be exposed. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricAllowlist: [] + +# Comma-separated list of metrics not to be enabled. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricDenylist: [] + +# Comma-separated list of additional Kubernetes label keys that will be used in the resource's +# labels metric. By default the metric contains only name and namespace labels. +# To include additional labels, provide a list of resource names in their plural form and Kubernetes +# label keys you would like to allow for them (Example: '=namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...)'. +# A single '*' can be provided per resource instead to allow any labels, but that has +# severe performance implications (Example: '=pods=[*]'). +metricLabelsAllowlist: [] + # - namespaces=[k8s-label-1,k8s-label-n] + +# Comma-separated list of Kubernetes annotations keys that will be used in the resource' +# labels metric. By default the metric contains only name and namespace labels. +# To include additional annotations provide a list of resource names in their plural form and Kubernetes +# annotation keys you would like to allow for them (Example: '=namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...)'. +# A single '*' can be provided per resource instead to allow any annotations, but that has +# severe performance implications (Example: '=pods=[*]'). +metricAnnotationsAllowList: [] + # - pods=[k8s-annotation-1,k8s-annotation-n] + +# Available collectors for kube-state-metrics. +# By default, all available resources are enabled, comment out to disable. +collectors: + - certificatesigningrequests + - configmaps + - cronjobs + - daemonsets + - deployments + - endpoints + - horizontalpodautoscalers + - ingresses + - jobs + - leases + - limitranges + - mutatingwebhookconfigurations + - namespaces + - networkpolicies + - nodes + - persistentvolumeclaims + - persistentvolumes + - poddisruptionbudgets + - pods + - replicasets + - replicationcontrollers + - resourcequotas + - secrets + - services + - statefulsets + - storageclasses + - validatingwebhookconfigurations + - volumeattachments + # - ingressclasses + # - clusterrolebindings + # - clusterroles + # - roles + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Enabling support for customResourceState, will create a configMap including your config that will be read from kube-state-metrics +customResourceState: + # Whether to enable support for CustomResourceStateMetrics. + enabled: false + + # Whether to create the ConfigMap that holds the config. + create: true + + # Name of the ConfigMap that holds the config. If empty, name will be generated based on the release name. + name: "" + + # ConfigMap key that holds the config. + key: config.yaml + + # Definition of the CustomResourceStateMetrics. Add (Cluster)Role permissions to list/watch the resources defined in the config to rbac.extraRules. + config: {} + +# Enable only the release namespace for collecting resources. By default all namespaces are collected. +# If releaseNamespace and namespaces are both set a merged list will be collected. +releaseNamespace: false + +# Comma-separated list(string) or yaml list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" + +# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set, +# only namespaces that are excluded in namespaces-denylist will be used. +namespacesDenylist: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overridden +# If you set telemetryNodePort, you must also set service.type to NodePort +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 + # telemetryNodePort: 0 + +# Enable vertical pod autoscaler support for kube-state-metrics +verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: [] + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + # updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + # updateMode: Auto + +# volumeMounts are used to add custom volume mounts to deployment. +# See example below +volumeMounts: [] +# - mountPath: /etc/config +# name: config-volume + +# volumes are used to add custom volumes to deployment +# See example below +volumes: [] +# - configMap: +# name: cm-for-volume +# name: config-volume + +# Extra manifests to deploy as an array +extraManifests: [] + # - apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" + +## Containers allows injecting additional containers. +containers: [] + # - name: crd-init + # image: kiwigrid/k8s-sidecar:latest + +## InitContainers allows injecting additional initContainers. +initContainers: [] + # - name: crd-sidecar + # image: kiwigrid/k8s-sidecar:latest + +## dnsPolicy allows to change the default DNS configuration for the pod +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: ClusterFirst + +## dnsConfig allows setting up specific DNS configuration for the pod +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config +dnsConfig: {} + +## Settings for startup, liveness and readiness probes +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## + +## Startup probe can optionally be enabled. +## +startupProbe: + enabled: false + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + +## Liveness probe +## +livenessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + +## Readiness probe +## +readinessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/.helmignore b/opencloud/charts/prometheus/charts/prometheus-node-exporter/.helmignore new file mode 100644 index 0000000..2846d36 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +ci/ diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/Chart.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/Chart.yaml new file mode 100644 index 0000000..cf54941 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: 1.10.2 +description: A Helm chart for prometheus node-exporter +home: https://github.com/prometheus/node_exporter/ +keywords: +- node-exporter +- prometheus +- exporter +maintainers: +- email: gianrubio@gmail.com + name: gianrubio + url: https://github.com/gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh + url: https://github.com/zanhsieh +- email: rootsandtrees@posteo.de + name: zeritti + url: https://github.com/zeritti +name: prometheus-node-exporter +sources: +- https://github.com/prometheus/node_exporter/ +type: application +version: 4.49.1 diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/README.md b/opencloud/charts/prometheus/charts/prometheus-node-exporter/README.md new file mode 100644 index 0000000..a540467 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/README.md @@ -0,0 +1,96 @@ +# Prometheus Node Exporter + +Prometheus exporter for hardware and OS metrics exposed by *NIX kernels, written in Go with pluggable metric collectors. + +This chart bootstraps a Prometheus [Node Exporter](http://github.com/prometheus/node_exporter) daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Usage + +The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/). + +- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter` +- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus-node-exporter` + +The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository. + +### Install Chart + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter +``` + +_See [configuration](#configuring) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +### Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +### Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +#### 3.x to 4.x + +Starting from version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade. + +```console +kubectl delete daemonset -l app=prometheus-node-exporter +helm upgrade -i prometheus-node-exporter prometheus-community/prometheus-node-exporter +``` + +If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels. + +#### From 2.x to 3.x + +Change the following: + +```yaml +hostRootFsMount: true +``` + +to: + +```yaml +hostRootFsMount: + enabled: true + mountPropagation: HostToContainer +``` + +## Configuring + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values oci://ghcr.io/prometheus-community/charts/prometheus-node-exporter +``` + +### kube-rbac-proxy + +You can enable `prometheus-node-exporter` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy a RBAC proxy container protecting the node-exporter endpoint. +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-node-exporter-read +rules: + - apiGroups: [ "" ] + resources: ["services/node-exporter-prometheus-node-exporter"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt new file mode 100644 index 0000000..db8584d --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt @@ -0,0 +1,29 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-node-exporter.namespace" . }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-node-exporter.namespace" . }} -l "app.kubernetes.io/name={{ template "prometheus-node-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9100 to use your application" + kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100 +{{- end }} + +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints is now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "prometheus-node-exporter.fullname" . }}"] + verbs: + - get +``` +{{- end }} \ No newline at end of file diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl new file mode 100644 index 0000000..890c487 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl @@ -0,0 +1,237 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-node-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-node-exporter.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-node-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "prometheus-node-exporter.labels" -}} +helm.sh/chart: {{ include "prometheus-node-exporter.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ include "prometheus-node-exporter.name" . }} +{{ include "prometheus-node-exporter.selectorLabels" . }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end }} +{{- with .Values.commonLabels }} +{{ tpl (toYaml .) $ }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus-node-exporter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "prometheus-node-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-node-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +The image to use +*/}} +{{- define "prometheus-node-exporter.image" -}} +{{- if .Values.image.sha }} +{{- fail "image.sha forbidden. Use image.digest instead" }} +{{- else if .Values.image.digest }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "prometheus-node-exporter.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Create the namespace name of the service monitor +*/}} +{{- define "prometheus-node-exporter.monitor-namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- if .Values.prometheus.monitor.namespace }} +{{- .Values.prometheus.monitor.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for servicemonitor */}} +{{- define "servicemonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "prometheus-node-exporter.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Create the namespace name of the pod monitor +*/}} +{{- define "prometheus-node-exporter.podmonitor-namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- if .Values.prometheus.podMonitor.namespace }} +{{- .Values.prometheus.podMonitor.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for podmonitor */}} +{{- define "podmonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end }} + +{{/* Sets sidecar volumeMounts */}} +{{- define "prometheus-node-exporter.sidecarVolumeMounts" -}} +{{- range $_, $mount := $.Values.sidecarVolumeMount }} +- name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} +{{- end }} +{{- range $_, $mount := $.Values.sidecarHostVolumeMounts }} +- name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} +{{- if $mount.mountPropagation }} + mountPropagation: {{ $mount.mountPropagation }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +The default node affinity to exclude +- AWS Fargate +- Azure virtual nodes +*/}} +{{- define "prometheus-node-exporter.defaultAffinity" -}} +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + - key: type + operator: NotIn + values: + - virtual-kubelet +{{- end -}} +{{- define "prometheus-node-exporter.mergedAffinities" -}} +{{- $defaultAffinity := include "prometheus-node-exporter.defaultAffinity" . | fromYaml -}} +{{- with .Values.affinity -}} + {{- if .nodeAffinity -}} + {{- $_ := set $defaultAffinity "nodeAffinity" (mergeOverwrite $defaultAffinity.nodeAffinity .nodeAffinity) -}} + {{- end -}} + {{- if .podAffinity -}} + {{- $_ := set $defaultAffinity "podAffinity" .podAffinity -}} + {{- end -}} + {{- if .podAntiAffinity -}} + {{- $_ := set $defaultAffinity "podAntiAffinity" .podAntiAffinity -}} + {{- end -}} +{{- end -}} +{{- toYaml $defaultAffinity -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml new file mode 100644 index 0000000..c256dba --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +rules: + {{- if $.Values.kubeRBACProxy.enabled }} + - apiGroups: [ "authentication.k8s.io" ] + resources: + - tokenreviews + verbs: [ "create" ] + - apiGroups: [ "authorization.k8s.io" ] + resources: + - subjectaccessreviews + verbs: [ "create" ] + {{- end }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..653305a --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + name: {{ template "prometheus-node-exporter.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml new file mode 100644 index 0000000..3d8293d --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -0,0 +1,349 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.daemonsetAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + spec: + automountServiceAccountToken: {{ ternary true false (or .Values.serviceAccount.automountServiceAccountToken .Values.kubeRBACProxy.enabled) }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.extraInitContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "prometheus-node-exporter.serviceAccountName" . }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + containers: + {{- $servicePort := ternary .Values.kubeRBACProxy.port .Values.service.port .Values.kubeRBACProxy.enabled }} + {{- $servicePortReference := ternary .Values.kubeRBACProxy.port .Values.service.portName .Values.kubeRBACProxy.enabled }} + - name: node-exporter + image: {{ include "prometheus-node-exporter.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.hostRootFsMount.enabled }} + - --path.rootfs=/host/root + {{- if semverCompare ">=1.4.0-0" (coalesce .Values.version .Values.image.tag .Chart.AppVersion) }} + - --path.udev.data=/host/root/run/udev/data + {{- end }} + {{- end }} + - --web.listen-address=[$(HOST_IP)]:{{ $servicePort }} + {{- with .Values.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + - name: HOST_IP + {{- if .Values.kubeRBACProxy.enabled }} + value: 127.0.0.1 + {{- else if .Values.service.listenOnAllInterfaces }} + value: 0.0.0.0 + {{- else }} + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- end }} + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if eq .Values.kubeRBACProxy.enabled false }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + {{- if .Values.kubeRBACProxy.enabled }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: / + port: {{ $servicePortReference }} + scheme: {{ upper .Values.livenessProbe.httpGet.scheme }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + httpGet: + {{- if .Values.kubeRBACProxy.enabled }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: / + port: {{ $servicePortReference }} + scheme: {{ upper .Values.readinessProbe.httpGet.scheme }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.terminationMessageParams.enabled }} + {{- with .Values.terminationMessageParams }} + terminationMessagePath: {{ .terminationMessagePath }} + terminationMessagePolicy: {{ .terminationMessagePolicy }} + {{- end }} + {{- end }} + volumeMounts: + - name: proc + mountPath: /host/proc + {{- with .Values.hostProcFsMount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + readOnly: true + - name: sys + mountPath: /host/sys + {{- with .Values.hostSysFsMount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + readOnly: true + {{- if .Values.hostRootFsMount.enabled }} + - name: root + mountPath: /host/root + {{- with .Values.hostRootFsMount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + readOnly: true + {{- end }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- with $mount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + {{- end }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: true + {{- end }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + {{- end }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- range .Values.sidecars }} + {{- $overwrites := dict "volumeMounts" (concat (include "prometheus-node-exporter.sidecarVolumeMounts" $ | fromYamlArray) (.volumeMounts | default list) | default list) }} + {{- $defaults := dict "image" (include "prometheus-node-exporter.image" $) "securityContext" $.Values.containerSecurityContext "imagePullPolicy" $.Values.image.pullPolicy }} + - {{- toYaml (merge $overwrites . $defaults) | nindent 10 }} + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 12 }} + {{- end }} + - --secure-listen-address=:{{ .Values.service.port}} + - --upstream=http://127.0.0.1:{{ $servicePort }}/ + - --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort }} + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + {{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }} + - --tls-cert-file=/tls/private/{{ .Values.tlsSecret.certItem }} + - --tls-private-key-file=/tls/private/{{ .Values.tlsSecret.keyItem }} + {{- if and .Values.kubeRBACProxy.tls.tlsClientAuth .Values.tlsSecret.caItem }} + - --client-ca-file=/tls/private/{{ .Values.tlsSecret.caItem }} + {{- end }} + {{- end }} + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + {{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }} + - name: {{ tpl .Values.tlsSecret.volumeName . | quote }} + mountPath: /tls/private + readOnly: true + {{- end }} + {{- with .Values.kubeRBACProxy.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + {{- if .Values.kubeRBACProxy.image.sha }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}@sha256:{{ .Values.kubeRBACProxy.image.sha }}" + {{- else }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}" + {{- end }} + ports: + - containerPort: {{ .Values.service.port}} + name: {{ .Values.kubeRBACProxy.portName }} + {{- if .Values.kubeRBACProxy.enableHostPort }} + hostPort: {{ .Values.service.port }} + {{- end }} + - containerPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }} + {{- if .Values.kubeRBACProxy.enableProxyEndpointsHostPort }} + hostPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }} + {{- end }} + name: "http-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: {{ .Values.kubeRBACProxy.proxyEndpointsPort }} + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: + {{- toYaml .Values.kubeRBACProxy.resources | nindent 12 }} + {{- end }} + {{- if .Values.terminationMessageParams.enabled }} + {{- with .Values.terminationMessageParams }} + terminationMessagePath: {{ .terminationMessagePath }} + terminationMessagePolicy: {{ .terminationMessagePolicy }} + {{- end }} + {{- end }} + {{- with .Values.kubeRBACProxy.env }} + env: + {{- range $key, $value := $.Values.kubeRBACProxy.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: + {{ toYaml .Values.kubeRBACProxy.containerSecurityContext | nindent 12 }} + {{- end }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostPID: {{ .Values.hostPID }} + hostIPC: {{ .Values.hostIPC }} + affinity: + {{- include "prometheus-node-exporter.mergedAffinities" . | nindent 8 }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.restartPolicy }} + restartPolicy: {{ . }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.hostRootFsMount.enabled }} + - name: root + hostPath: + path: / + {{- end }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} + {{- with $mount.type }} + type: {{ . }} + {{- end }} + {{- end }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + emptyDir: + medium: Memory + {{- end }} + {{- range $_, $mount := .Values.sidecarHostVolumeMounts }} + - name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} + {{- end }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + configMap: + name: {{ $mount.name }} + {{- end }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ $mount.name }} + secret: + secretName: {{ $mount.name }} + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config + {{- end }} + {{- if .Values.tlsSecret.enabled }} + - name: {{ tpl .Values.tlsSecret.volumeName . | quote }} + secret: + secretName: {{ tpl .Values.tlsSecret.secretName . | quote }} + items: + - key: {{ required "Value tlsSecret.certItem must be set." .Values.tlsSecret.certItem | quote }} + path: {{ .Values.tlsSecret.certItem | quote }} + - key: {{ required "Value tlsSecret.keyItem must be set." .Values.tlsSecret.keyItem | quote }} + path: {{ .Values.tlsSecret.keyItem | quote }} + {{- if .Values.tlsSecret.caItem }} + - key: {{ .Values.tlsSecret.caItem | quote }} + path: {{ .Values.tlsSecret.caItem | quote }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml new file mode 100644 index 0000000..45eeb8d --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml @@ -0,0 +1,18 @@ +{{- if .Values.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +subsets: + - addresses: + {{- range .Values.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: 9100 + protocol: TCP +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml new file mode 100644 index 0000000..2b21b71 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl . $ }} +{{ end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml new file mode 100644 index 0000000..ee40902 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + ingress: + {{- if .Values.networkPolicy.ingress }} + {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} + {{- else }} + - ports: + - port: {{ .Values.service.port }} + {{- end }} + policyTypes: + - Egress + - Ingress + podSelector: + matchLabels: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml new file mode 100644 index 0000000..f88da6a --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml @@ -0,0 +1,91 @@ +{{- if .Values.prometheus.podMonitor.enabled }} +apiVersion: {{ .Values.prometheus.podMonitor.apiVersion | default "monitoring.coreos.com/v1" }} +kind: PodMonitor +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.podmonitor-namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.prometheus.podMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.podMonitor.jobLabel }} + {{- include "podmonitor.scrapeLimits" .Values.prometheus.podMonitor | nindent 2 }} + selector: + matchLabels: + {{- with .Values.prometheus.podMonitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "prometheus-node-exporter.namespace" . }} + {{- with .Values.prometheus.podMonitor.attachMetadata }} + attachMetadata: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + podMetricsEndpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.prometheus.podMonitor.scheme }} + scheme: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.path }} + path: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.bearerTokenSecret }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.authorization }} + authorization: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.oauth2 }} + oauth2: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.proxyUrl }} + proxyUrl: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorTimestamps }} + honorTimestamps: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorLabels }} + honorLabels: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + enableHttp2: {{ default false .Values.prometheus.podMonitor.enableHttp2 }} + filterRunning: {{ default true .Values.prometheus.podMonitor.filterRunning }} + followRedirects: {{ default false .Values.prometheus.podMonitor.followRedirects }} + {{- with .Values.prometheus.podMonitor.params }} + params: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml new file mode 100644 index 0000000..814e110 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config + namespace: {{ include "prometheus-node-exporter.namespace" . }} +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "prometheus-node-exporter.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "prometheus-node-exporter.fullname" . }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml new file mode 100644 index 0000000..9807c66 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} +{{- end }} +{{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.service.internalTrafficPolicy }} + internalTrafficPolicy: {{ .Values.service.internalTrafficPolicy }} +{{- end }} + type: {{ .Values.service.type }} +{{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} + ports: + - port: {{ .Values.service.servicePort | default .Values.service.port }} + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: {{ .Values.service.portName }} + selector: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml new file mode 100644 index 0000000..462b0cd --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.rbac.create .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} +{{- end }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml new file mode 100644 index 0000000..96ec1af --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml @@ -0,0 +1,65 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }} +kind: ServiceMonitor +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.monitor-namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | nindent 2 }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.monitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- with .Values.prometheus.monitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + {{- end }} + {{- with .Values.prometheus.monitor.attachMetadata }} + attachMetadata: + {{- toYaml . | nindent 4 }} + {{- end }} + endpoints: + - port: {{ .Values.service.portName }} + scheme: {{ .Values.prometheus.monitor.scheme }} + {{- with .Values.prometheus.monitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml new file mode 100644 index 0000000..2c2705f --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml @@ -0,0 +1,40 @@ +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +spec: + {{- with .Values.verticalPodAutoscaler.recommenders }} + recommenders: + {{- toYaml . | nindent 4 }} + {{- end }} + resourcePolicy: + containerPolicies: + - containerName: node-exporter + {{- with .Values.verticalPodAutoscaler.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.controlledValues }} + controlledValues: {{ . }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: apps/v1 + kind: DaemonSet + name: {{ include "prometheus-node-exporter.fullname" . }} + {{- with .Values.verticalPodAutoscaler.updatePolicy }} + updatePolicy: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-node-exporter/values.yaml b/opencloud/charts/prometheus/charts/prometheus-node-exporter/values.yaml new file mode 100644 index 0000000..c056625 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-node-exporter/values.yaml @@ -0,0 +1,616 @@ +# Default values for prometheus-node-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + registry: quay.io + repository: prometheus/node-exporter + # Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }} + tag: "" + pullPolicy: IfNotPresent + digest: "" + +imagePullSecrets: [] +# - name: "image-pull-secret" +nameOverride: "" +fullnameOverride: "" + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + +# Configure kube-rbac-proxy. When enabled, creates a kube-rbac-proxy to protect the node-exporter http endpoint. +# The requests are served through the same service but requests are HTTPS. +kubeRBACProxy: + enabled: false + ## Set environment variables as name/value pairs + env: {} + # VARIABLE: value + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.20.0 + sha: "" + pullPolicy: IfNotPresent + + # List of additional cli arguments to configure kube-rbac-proxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} + + # Specify the port used for the Node exporter container (upstream port) + port: 8100 + # Specify the name of the container port + portName: http + # Configure a hostPort. If true, hostPort will be enabled in the container and set to service.port. + enableHostPort: false + + # Configure Proxy Endpoints Port + # This is the port being probed for readiness + proxyEndpointsPort: 8888 + # Configure a hostPort. If true, hostPort will be enabled in the container and set to proxyEndpointsPort. + enableProxyEndpointsHostPort: false + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Additional volume mounts in the kube-rbac-proxy container + ## See extraVolumes below + extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /extra + # readOnly: true + + ## tls enables using TLS resources from a volume on secret referred to in tlsSecret below. + ## When enabling tlsClientAuth, client CA certificate must be set in tlsSecret.caItem. + ## Ref. https://github.com/brancz/kube-rbac-proxy/issues/187 + tls: + enabled: false + tlsClientAuth: false + +## tlsSecret refers to an existing secret holding TLS items: client CA certificate, private key and certificate. +## secretName and volumeName can be templated. +## If enabled, volume volumeName gets created on secret secretName. +## The volume's resources will be used by kube-rbac-proxy if kubeRBACProxy.tls.enabled is set. +tlsSecret: + enabled: false + ## Key with client CA certificate (optional) + caItem: "" + ## Key with certificate + certItem: tls.crt + ## Key with private key + keyItem: tls.key + ## Name of an existing secret + secretName: prometheus-node-exporter-tls + ## Name of the volume to be created + volumeName: prometheus-node-exporter-tls + +## Service configuration +service: + ## Creating a service is enabled by default + enabled: true + + ## Service type + type: ClusterIP + ## IP address for type ClusterIP + clusterIP: "" + ## Default service port. Sets the port of the exposed container as well (NE or kubeRBACProxy). + ## Use "servicePort" below if changing the service port only is desired. + port: 9100 + ## Service port. Use this field if you wish to set a different service port + ## without changing the container port ("port" above). + servicePort: "" + ## Targeted port in the pod. Must refer to an open container port ("port" or "portName"). + ## (IntOrString) + targetPort: 9100 + ## Name of the service port. Sets the port name of the main container (NE) as well. + portName: metrics + ## Port number for service type NodePort + nodePort: null + + ## If true, node exporter will listen on all interfaces + listenOnAllInterfaces: true + + ## Additional annotations and labels for the service + annotations: + prometheus.io/scrape: "true" + labels: {} + + ## Dual stack settings for the service + ## https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## External/Internal traffic policy setting (Cluster, Local) + ## https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-policies + externalTrafficPolicy: "" + internalTrafficPolicy: "" + +# Set a NetworkPolicy with: +# ingress only on service.port or custom policy +# no egress permitted +networkPolicy: + enabled: false + + # ingress: + # - {} + +# Additional environment variables that will be passed to the daemonset +env: {} +## env: +## VARIABLE: value + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + + jobLabel: "" + + # List of pod labels to add to node exporter metrics + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor + podTargetLabels: [] + + # List of target labels to add to node exporter metrics + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor + targetLabels: [] + + scheme: http + basicAuth: {} + bearerTokenFile: + tlsConfig: {} + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Override serviceMonitor selector + ## + selectorOverride: {} + + ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + ## + attachMetadata: + node: false + + relabelings: [] + metricRelabelings: [] + interval: "" + scrapeTimeout: 10s + ## prometheus.monitor.apiVersion ApiVersion for the serviceMonitor Resource(defaults to "monitoring.coreos.com/v1") + apiVersion: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + # PodMonitor defines monitoring for a set of pods. + # ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor + # Using a PodMonitor may be preferred in some environments where there is very large number + # of Node Exporter endpoints (1000+) behind a single service. + # The PodMonitor is disabled by default. When switching from ServiceMonitor to PodMonitor, + # the time series resulting from the configuration through PodMonitor may have different labels. + # For instance, there will not be the service label any longer which might + # affect PromQL queries selecting that label. + podMonitor: + enabled: false + # Namespace in which to deploy the pod monitor. Defaults to the release namespace. + namespace: "" + # Additional labels, e.g. setting a label for pod monitor selector as set in prometheus + additionalLabels: {} + # release: kube-prometheus-stack + # PodTargetLabels transfers labels of the Kubernetes Pod onto the target. + podTargetLabels: [] + # apiVersion defaults to monitoring.coreos.com/v1. + apiVersion: "" + # Override pod selector to select pod objects. + selectorOverride: {} + # Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + attachMetadata: + node: false + # The label to use to retrieve the job name from. Defaults to label app.kubernetes.io/name. + jobLabel: "" + + # Scheme/protocol to use for scraping. + scheme: "http" + # Path to scrape metrics at. + path: "/metrics" + + # BasicAuth allow an endpoint to authenticate over basic authentication. + # More info: https://prometheus.io/docs/operating/configuration/#endpoint + basicAuth: {} + # Secret to mount to read bearer token for scraping targets. + # The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core + bearerTokenSecret: {} + # TLS configuration to use when scraping the endpoint. + tlsConfig: {} + # Authorization section for this endpoint. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#safeauthorization + authorization: {} + # OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#oauth2 + oauth2: {} + + # ProxyURL eg http://proxyserver:2195. Directs scrapes through proxy to this endpoint. + proxyUrl: "" + # Interval at which endpoints should be scraped. If not specified Prometheus' global scrape interval is used. + interval: "" + # Timeout after which the scrape is ended. If not specified, the Prometheus global scrape interval is used. + scrapeTimeout: "" + # HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + honorTimestamps: true + # HonorLabels chooses the metric's labels on collisions with target labels. + honorLabels: true + # Whether to enable HTTP2. Default false. + enableHttp2: "" + # Drop pods that are not running. (Failed, Succeeded). + # Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + filterRunning: "" + # FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. Default false. + followRedirects: "" + # Optional HTTP URL parameters + params: {} + + # RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds + # relabelings for a few standard Kubernetes fields. The original scrape job's name + # is available via the __tmp_prometheus_job_name label. + # More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + relabelings: [] + # MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + # SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + sampleLimit: 0 + # TargetLimit defines a limit on the number of scraped targets that will be accepted. + targetLimit: 0 + # Per-scrape limit on number of labels that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelLimit: 0 + # Per-scrape limit on length of labels name that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelNameLengthLimit: 0 + # Per-scrape limit on length of labels value that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelValueLengthLimit: 0 + +## Customize the updateStrategy if set +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +# Specify the container restart policy passed to the Node Export container +# Possible Values: Always (default)|OnFailure|Never +restartPolicy: null + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + imagePullSecrets: [] + automountServiceAccountToken: false + +securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + +containerSecurityContext: + readOnlyRootFilesystem: true + # capabilities: + # add: + # - SYS_TIME + +rbac: + ## If true, create & use RBAC resources + ## + create: true + +# for deployments that have node_exporter deployed outside of the cluster, list +# their addresses here +endpoints: [] + +# Expose the service to the host network +hostNetwork: true + +# Share the host process ID namespace +hostPID: true + +# Share the host ipc namespace +hostIPC: false + +# Mount the node's root file system (/) at /host/root in the container +hostRootFsMount: + enabled: true + # Defines how new mounts in existing mounts on the node or in the container + # are propagated to the container or node, respectively. Possible values are + # None, HostToContainer, and Bidirectional. If this field is omitted, then + # None is used. More information on: + # https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation + mountPropagation: HostToContainer + +# Mount the node's proc file system (/proc) at /host/proc in the container +hostProcFsMount: + # Possible values are None, HostToContainer, and Bidirectional + mountPropagation: "" + +# Mount the node's sys file system (/sys) at /host/sys in the container +hostSysFsMount: + # Possible values are None, HostToContainer, and Bidirectional + mountPropagation: "" + +## Assign a group of affinity scheduling rules +## The default nodeAffinity excludes Fargate nodes and virtual kubelets from scheduling +## unless overriden by hard node affinity set in the field. +affinity: {} +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchFields: +# - key: metadata.name +# operator: In +# values: +# - target-host-name + +# Annotations to be added to node exporter pods +podAnnotations: + # Fix for very slow GKE cluster upgrades + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + +# Extra labels to add to node exporter pods (can be templated) +podLabels: {} + +## Extra labels to attach to all resources (can be templated) +commonLabels: {} + +# Annotations to be added to node exporter daemonset +daemonsetAnnotations: {} + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +# Custom DNS configuration to be added to prometheus-node-exporter pods +dnsConfig: {} +# nameservers: +# - 1.2.3.4 +# searches: +# - ns1.svc.cluster-domain.example +# - my.dns.search.suffix +# options: +# - name: ndots +# value: "2" +# - name: edns0 + +## Assign a nodeSelector if operating a hybrid cluster +## +nodeSelector: + kubernetes.io/os: linux + # kubernetes.io/arch: amd64 + +# Specify grace period for graceful termination of pods. Defaults to 30 if null or not specified +terminationGracePeriodSeconds: null + +tolerations: + - effect: NoSchedule + operator: Exists + +# Enable or disable container termination message settings +# https://kubernetes.io/docs/tasks/debug/debug-application/determine-reason-pod-failure/ +terminationMessageParams: + enabled: false + # If enabled, specify the path for termination messages + terminationMessagePath: /dev/termination-log + # If enabled, specify the policy for termination messages + terminationMessagePolicy: File + + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +## Additional container arguments +## +extraArgs: [] +# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$ +# - --collector.textfile.directory=/run/prometheus + +## Additional mounts from the host to node-exporter container +## +extraHostVolumeMounts: [] +# - name: +# hostPath: +# https://kubernetes.io/docs/concepts/storage/volumes/#hostpath-volume-types +# type: "" (Default)|DirectoryOrCreate|Directory|FileOrCreate|File|Socket|CharDevice|BlockDevice +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +## Additional configmaps to be mounted. +## +configmaps: [] +# - name: +# mountPath: + +secrets: [] +# - name: +# mountPath: + +## Override the deployment namespace +## +namespaceOverride: "" + +## Additional containers for export metrics to text file; fields image,imagePullPolicy,securityContext take default value from main container +## +sidecars: [] +# - name: nvidia-dcgm-exporter +# image: nvidia/dcgm-exporter:1.4.3 +# volumeMounts: +# - name: tmp +# mountPath: /tmp + +## Volume for sidecar containers +## +sidecarVolumeMount: [] +# - name: collector-textfiles +# mountPath: /run/prometheus +# readOnly: false + +## Additional mounts from the host to sidecar containers +## +sidecarHostVolumeMounts: [] +# - name: +# hostPath: +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +## Liveness probe +## +livenessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +## Readiness probe +## +readinessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +# Enable vertical pod autoscaler support for prometheus-node-exporter +verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + # updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + # updateMode: Auto + +# Extra manifests to deploy as an array +extraManifests: [] + # - | + # apiVersion: v1 + # kind: ConfigMap + # metadata: + # name: prometheus-extra + # data: + # extra-data: "value" + +## Extra volumes to become available in the pod +extraVolumes: [] + # - name: extra-volume + # secret: + # defaultMode: 420 + # optional: false + # secretName: node-exporter-secret + +## Extra volume mounts in the node-exporter container +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /extra + # readOnly: true + +# Override version of app, required if image.tag is defined and does not follow semver +version: "" diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/.helmignore b/opencloud/charts/prometheus/charts/prometheus-pushgateway/.helmignore new file mode 100644 index 0000000..4bcfe5f --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +# OWNERS file for Kubernetes +OWNERS + +ci/ diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/Chart.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/Chart.yaml new file mode 100644 index 0000000..7b608f3 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/Chart.yaml @@ -0,0 +1,27 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: v1.11.2 +description: A Helm chart for prometheus pushgateway +home: https://github.com/prometheus/pushgateway +keywords: +- pushgateway +- prometheus +maintainers: +- email: gianrubio@gmail.com + name: gianrubio + url: https://github.com/gianrubio +- email: christian.staude@staffbase.com + name: cstaud + url: https://github.com/cstaud +- email: rootsandtrees@posteo.de + name: zeritti + url: https://github.com/zeritti +name: prometheus-pushgateway +sources: +- https://github.com/prometheus/pushgateway +type: application +version: 3.4.2 diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/README.md b/opencloud/charts/prometheus/charts/prometheus-pushgateway/README.md new file mode 100644 index 0000000..8cf8fda --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/README.md @@ -0,0 +1,101 @@ +# Prometheus Pushgateway + +This chart bootstraps a Prometheus [Pushgateway](http://github.com/prometheus/pushgateway) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +An optional prometheus `ServiceMonitor` can be enabled, should you wish to use this gateway with [Prometheus Operator](https://github.com/coreos/prometheus-operator). + +## Usage + +The chart is distributed as an [OCI Artifact](https://helm.sh/docs/topics/registries/) as well as via a traditional [Helm Repository](https://helm.sh/docs/topics/chart_repository/). + +- OCI Artifact: `oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway` +- Helm Repository: `https://prometheus-community.github.io/helm-charts` with chart `prometheus-pushgateway` + +The installation instructions use the OCI registry. Refer to the [`helm repo`]([`helm repo`](https://helm.sh/docs/helm/helm_repo/)) command documentation for information on installing charts via the traditional repository. + +### Install Chart + +```console +helm install [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +### Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +### Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +#### To 3.0.0 + +Previously, as dynamic labels were also set on the statefulset's volume claim template, it was not possible +to upgrade a chart release in a usual manner whilst using a statefulset and persistence due to the volume claim template's fields being immutable. + +This release removes the dynamic labels from the statefulset's volume claim template. +If you have configured a statefulset with persistent storage, +please, delete the statefulset before upgrading: + +```console +kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway --cascade=orphan +``` + +#### To 2.0.0 + +Chart API version has been upgraded to v2 so Helm 3 is needed from now on. + +Docker image tag is used from Chart.yaml appVersion field by default now. + +Version 2.0.0 also adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). Specifically, labels mapping is listed below: + +```console +OLD => NEW +---------------------------------------- +heritage => app.kubernetes.io/managed-by +chart => helm.sh/chart +[container version] => app.kubernetes.io/version +app => app.kubernetes.io/name +release => app.kubernetes.io/instance +``` + +Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. + +If `runAsStatefulSet: false` (this is the default): + +```console +kubectl delete deploy -l app=prometheus-pushgateway +``` + +If `runAsStatefulSet: true`: + +```console +kubectl delete sts -l app=prometheus-pushgateway +``` + +After that do the actual upgrade: + +```console +helm upgrade -i prometheus-pushgateway prometheus-community/prometheus-pushgateway +``` + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values oci://ghcr.io/prometheus-community/charts/prometheus-pushgateway +``` diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt new file mode 100644 index 0000000..263b1d8 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-pushgateway.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-pushgateway.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-pushgateway.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-pushgateway.namespace" . }} {{ template "prometheus-pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-pushgateway.namespace" . }} -l "app.kubernetes.io/name={{ template "prometheus-pushgateway.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl port-forward $POD_NAME 9091 + echo "Visit http://127.0.0.1:9091 to use your application" +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl new file mode 100644 index 0000000..ba379b2 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl @@ -0,0 +1,297 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-pushgateway.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Namespace to set on the resources +*/}} +{{- define "prometheus-pushgateway.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-pushgateway.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-pushgateway.chart" -}} +{{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-pushgateway.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "prometheus-pushgateway.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create default labels +*/}} +{{- define "prometheus-pushgateway.defaultLabels" -}} +helm.sh/chart: {{ include "prometheus-pushgateway.chart" . }} +{{ include "prometheus-pushgateway.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus-pushgateway.selectorLabels" -}} +app.kubernetes.io/name: {{ include "prometheus-pushgateway.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Define webConfiguration +*/}} +{{- define "prometheus-pushgateway.webConfiguration" -}} +basic_auth_users: +{{- range $k, $v := .Values.webConfiguration.basicAuthUsers }} + {{ $k }}: {{ htpasswd "" $v | trimPrefix ":"}} +{{- end }} +{{- end }} + +{{/* +Define Authorization +*/}} +{{- define "prometheus-pushgateway.Authorization" -}} +{{- $users := keys .Values.webConfiguration.basicAuthUsers }} +{{- $user := first $users }} +{{- $password := index .Values.webConfiguration.basicAuthUsers $user }} +{{- $user }}:{{ $password }} +{{- end }} + +{{/* +Define basicAuth +*/}} +{{- define "prometheus-pushgateway.basicAuth" -}} +{{- $users := keys .Values.webConfiguration.basicAuthUsers }} +{{- $user := first $users }} +{{- $password := index .Values.webConfiguration.basicAuthUsers $user -}} +user: {{ $user | b64enc | quote }} +password: {{ $password | b64enc | quote }} +{{- end }} + +{{/* +Set the image with or without the registry +*/}} +{{- define "prometheus-pushgateway.image" -}} +{{- $registry := default .Values.image.registry (.Values.global).imageRegistry }} +{{- $repository := .Values.image.repository }} +{{- $tag := default .Chart.AppVersion .Values.image.tag }} +{{- if $registry }} + {{- printf "%s/%s:%s" $registry $repository $tag -}} +{{- else -}} + {{- printf "%s:%s" $repository $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Returns pod spec +*/}} +{{- define "prometheus-pushgateway.podSpec" -}} +serviceAccountName: {{ include "prometheus-pushgateway.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . | quote }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: +{{- toYaml . | nindent 2 }} +{{- end }} +{{- with (.Values.global).imagePullSecrets | default .Values.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.extraInitContainers }} +initContainers: + {{- toYaml . | nindent 2 }} +{{- end }} +containers: + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 2 }} + {{- end }} + - name: pushgateway + image: {{ include "prometheus-pushgateway.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.extraVars }} + env: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- if or .Values.extraArgs .Values.webConfiguration }} + args: + {{- with .Values.extraArgs }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- if .Values.webConfiguration }} + - --web.config.file=/etc/config/web-config.yaml + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9091 + protocol: TCP + {{- if .Values.liveness.enabled }} + {{- $livenessCommon := omit .Values.liveness.probe "httpGet" }} + livenessProbe: + {{- with .Values.liveness.probe }} + httpGet: + path: {{ .httpGet.path }} + port: {{ .httpGet.port }} + {{- if or .httpGet.httpHeaders $.Values.webConfiguration.basicAuthUsers }} + httpHeaders: + {{- if $.Values.webConfiguration.basicAuthUsers }} + - name: Authorization + value: Basic {{ include "prometheus-pushgateway.Authorization" $ | b64enc }} + {{- end }} + {{- with .httpGet.httpHeaders }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- toYaml $livenessCommon | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.readiness.enabled }} + {{- $readinessCommon := omit .Values.readiness.probe "httpGet" }} + readinessProbe: + {{- with .Values.readiness.probe }} + httpGet: + path: {{ .httpGet.path }} + port: {{ .httpGet.port }} + {{- if or .httpGet.httpHeaders $.Values.webConfiguration.basicAuthUsers }} + httpHeaders: + {{- if $.Values.webConfiguration.basicAuthUsers }} + - name: Authorization + value: Basic {{ include "prometheus-pushgateway.Authorization" $ | b64enc }} + {{- end }} + {{- with .httpGet.httpHeaders }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- toYaml $readinessCommon | nindent 6 }} + {{- end }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycle }} + lifecycle: {{ toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + subPath: "{{ .Values.persistentVolume.subPath }}" + {{- if .Values.webConfiguration }} + - name: web-config + mountPath: "/etc/config" + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- if or .Values.podAntiAffinity .Values.affinity }} +affinity: +{{- end }} + {{- with .Values.affinity }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- if eq .Values.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "prometheus-pushgateway.name" . }}]} + {{- else if eq .Values.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "prometheus-pushgateway.name" . }}]} + {{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + {{- $storageVolumeAsPVCTemplate := and .Values.runAsStatefulSet .Values.persistentVolume.enabled -}} + {{- if not $storageVolumeAsPVCTemplate }} + - name: storage-volume + {{- if .Values.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistentVolume.existingClaim }}{{ .Values.persistentVolume.existingClaim }}{{- else }}{{ include "prometheus-pushgateway.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.webConfiguration }} + - name: web-config + secret: + secretName: {{ include "prometheus-pushgateway.fullname" . }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 2 }} + {{- else if $storageVolumeAsPVCTemplate }} + {{- if .Values.webConfiguration }} + - name: web-config + secret: + secretName: {{ include "prometheus-pushgateway.fullname" . }} + {{- else }} + [] + {{- end }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml new file mode 100644 index 0000000..a655380 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml @@ -0,0 +1,32 @@ +{{- if not .Values.runAsStatefulSet }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.deploymentAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }} + spec: + {{- include "prometheus-pushgateway.podSpec" . | nindent 6 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/extra-manifests.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/extra-manifests.yaml new file mode 100644 index 0000000..bafee95 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/extra-manifests.yaml @@ -0,0 +1,8 @@ +{{- range .Values.extraManifests }} +--- + {{- if typeIs "string" . }} + {{- tpl . $ }} + {{- else }} + {{- tpl (. | toYaml | nindent 0) $ }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml new file mode 100644 index 0000000..c449406 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled }} +{{- $serviceName := include "prometheus-pushgateway.fullname" . }} +{{- $servicePort := .Values.service.port }} +{{- $ingressPath := .Values.ingress.path }} +{{- $ingressClassName := .Values.ingress.className }} +{{- $ingressPathType := .Values.ingress.pathType }} +{{- $extraPaths := .Values.ingress.extraPaths }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + ingressClassName: {{ $ingressClassName }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + pathType: {{ $ingressPathType }} + backend: + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml new file mode 100644 index 0000000..529b255 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.networkPolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- if .Values.networkPolicy.customSelectors }} + name: ingress-allow-customselector-{{ template "prometheus-pushgateway.name" . }} + {{- else if .Values.networkPolicy.allowAll }} + name: ingress-allow-all-{{ template "prometheus-pushgateway.name" . }} + {{- else -}} + {{- fail "One of `allowAll` or `customSelectors` must be specified." }} + {{- end }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + podSelector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- with .Values.networkPolicy.customSelectors }} + from: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml new file mode 100644 index 0000000..1ee77e1 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml new file mode 100644 index 0000000..d2a85f4 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml @@ -0,0 +1,29 @@ +{{- if and (not .Values.runAsStatefulSet) .Values.persistentVolume.enabled (not .Values.persistentVolume.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- with .Values.persistentVolume.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.persistentVolumeLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + accessModes: + {{- toYaml .Values.persistentVolume.accessModes | nindent 4 }} + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/secret.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/secret.yaml new file mode 100644 index 0000000..a6d6c04 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/secret.yaml @@ -0,0 +1,24 @@ +{{- if .Values.webConfiguration }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ include "prometheus-pushgateway.namespace" . }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} +data: + web-config.yaml: {{ include "prometheus-pushgateway.webConfiguration" . | b64enc}} +{{- end }} +--- +{{- if and .Values.webConfiguration .Values.serviceMonitor.enabled (empty .Values.serviceMonitor.basicAuth) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth + namespace: {{ default (include "prometheus-pushgateway.namespace" .) .Values.serviceMonitor.namespace }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} +data: + {{- include "prometheus-pushgateway.basicAuth" . | nindent 2 }} +type: Opaque +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml new file mode 100644 index 0000000..15029f7 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml @@ -0,0 +1,45 @@ +{{- $stsNoHeadlessSvcTypes := list "LoadBalancer" "NodePort" -}} +apiVersion: v1 +kind: Service +metadata: + {{- with .Values.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{ else if and .Values.runAsStatefulSet (not (has .Values.service.type $stsNoHeadlessSvcTypes)) }} + clusterIP: None # Headless service + {{- end }} + {{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} + {{- end }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + {{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + protocol: TCP + name: {{ .Values.service.portName }} + selector: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 4 }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml new file mode 100644 index 0000000..88f1470 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.serviceAccountLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.serviceAccountName" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml new file mode 100644 index 0000000..78a2081 --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml @@ -0,0 +1,58 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- if .Values.serviceMonitor.additionalLabels }} + {{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ default (include "prometheus-pushgateway.namespace" .) .Values.serviceMonitor.namespace }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scheme }} + scheme: {{ . }} + {{- end }} + {{- if and .Values.webConfiguration (empty .Values.serviceMonitor.basicAuth) }} + basicAuth: + password: + name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth + key: password + username: + name: {{ include "prometheus-pushgateway.fullname" . }}-basic-auth + key: user + {{- else if not (empty .Values.serviceMonitor.basicAuth) }} + basicAuth: {{ toYaml .Values.serviceMonitor.basicAuth | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.bearerTokenFile }} + bearerTokenFile: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .| nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + path: {{ .Values.serviceMonitor.telemetryPath }} + honorLabels: {{ .Values.serviceMonitor.honorLabels }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- tpl (toYaml . | nindent 6) $ }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "prometheus-pushgateway.namespace" . }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} +{{- end -}} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml new file mode 100644 index 0000000..6fa39ea --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml @@ -0,0 +1,51 @@ +{{- if .Values.runAsStatefulSet }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "prometheus-pushgateway.fullname" . }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }} + spec: + {{- include "prometheus-pushgateway.podSpec" . | nindent 6 }} + {{- if .Values.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + {{- with .Values.persistentVolume.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.persistentVolumeLabels }} + labels: + {{- toYaml . | nindent 10 }} + {{- end }} + name: storage-volume + spec: + accessModes: + {{ toYaml .Values.persistentVolume.accessModes }} + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/charts/prometheus-pushgateway/values.yaml b/opencloud/charts/prometheus/charts/prometheus-pushgateway/values.yaml new file mode 100644 index 0000000..93d780f --- /dev/null +++ b/opencloud/charts/prometheus/charts/prometheus-pushgateway/values.yaml @@ -0,0 +1,393 @@ +# Default values for prometheus-pushgateway. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + imageRegistry: "" + imagePullSecrets: [] + +# Provide a name in place of prometheus-pushgateway for `app:` labels +nameOverride: "" + +# Provide a name to substitute for the full names of resources +fullnameOverride: "" + +# Provide a namespace to substitude for the namespace on resources +namespaceOverride: "" + +image: + registry: "" + repository: quay.io/prometheus/pushgateway + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + +# Optional pod imagePullSecrets +imagePullSecrets: [] + +service: + type: ClusterIP + port: 9091 + targetPort: 9091 + # nodePort: 32100 + portName: http + + # Optional - Can be used for headless if value is "None" + clusterIP: "" + + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + +# Whether to automatically mount a service account token into the pod +automountServiceAccountToken: true + +# Optional deployment annotations +deploymentAnnotations: {} + +# Optional pod annotations +podAnnotations: {} + +# Optional pod labels +podLabels: {} + +# Optional service annotations +serviceAnnotations: {} + +# Optional service labels +serviceLabels: {} + +# Optional serviceAccount labels +serviceAccountLabels: {} + +# Optional persistentVolume labels +persistentVolumeLabels: {} + +# Optional additional environment variables +extraVars: [] + +## Additional pushgateway container arguments +## +## example: +## extraArgs: +## - --persistence.file=/data/pushgateway.data +## - --persistence.interval=5m +extraArgs: [] + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +# Optional additional containers (sidecar) +extraContainers: [] + # - name: oAuth2-proxy + # args: + # - -https-address=:9092 + # - -upstream=http://localhost:9091 + # - -skip-auth-regex=^/metrics + # - -openshift-delegate-urls={"/":{"group":"monitoring.coreos.com","resource":"prometheuses","verb":"get"}} + # image: openshift/oauth-proxy:v1.1.0 + # ports: + # - containerPort: 9092 + # name: proxy + # resources: + # limits: + # memory: 16Mi + # requests: + # memory: 4Mi + # cpu: 20m + # volumeMounts: + # - mountPath: /etc/prometheus/secrets/pushgateway-tls + # name: secret-pushgateway-tls + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +# -- Sets web configuration +# To enable basic authentication, provide basicAuthUsers as a map +# If serviceMonitor.enabled is set, a secret with these credentials will be created +# and configured in serviceMonitor. serviceMonitor.basicAuth overrides this secret. +webConfiguration: {} + # basicAuthUsers: + # username: password + +liveness: + enabled: true + probe: + httpGet: + path: /-/healthy + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + +readiness: + enabled: true + probe: + httpGet: + path: /-/ready + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Configure ingress resource that allow you to access the +## pushgateway installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Enable Ingress. + ## + enabled: false + # AWS ALB requires path of /* + className: "" + path: / + pathType: ImplementationSpecific + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Annotations. + ## + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - pushgateway.domain.com + + ## TLS configuration. + ## Secrets must be manually created in the namespace. + ## + # tls: + # - secretName: pushgateway-tls + # hosts: + # - pushgateway.domain.com + +tolerations: [] + # - effect: NoSchedule + # operator: Exists + +## Node labels for pushgateway pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +replicaCount: 1 + +hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # - ip: "10.1.2.3" + # hostnames: + # - "foo.remote" + # - "bar.remote" + +## When running more than one replica alongside with persistence, different volumes are needed +## per replica, since sharing a `persistence.file` across replicas does not keep metrics synced. +## For this purpose, you can enable the `runAsStatefulSet` to deploy the pushgateway as a +## StatefulSet instead of as a Deployment. +runAsStatefulSet: false + +## Security context to be added to push-gateway pods +## +securityContext: + fsGroup: 65534 + runAsUser: 65534 + runAsNonRoot: true + +## Security context to be added to push-gateway containers +## Having a separate variable as securityContext differs for pods and containers. +containerSecurityContext: {} +# allowPrivilegeEscalation: false +# readOnlyRootFilesystem: true +# runAsUser: 65534 +# runAsNonRoot: true + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Pod anti-affinity can prevent the scheduler from placing pushgateway replicas on the same node. +## The value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. +## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. +## The default value "" will disable pod anti-affinity so that no anti-affinity rules will be configured (unless set in `affinity`). +## +podAntiAffinity: "" + +## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. +## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone +## +podAntiAffinityTopologyKey: kubernetes.io/hostname + +## Topology spread constraints for pods +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + +# Enable this if you're using https://github.com/coreos/prometheus-operator +serviceMonitor: + enabled: false + namespace: monitoring + + # telemetryPath: HTTP resource path from which to fetch metrics. + # Telemetry path, default /metrics, has to be prefixed accordingly if pushgateway sets a route prefix at start-up. + # + telemetryPath: "/metrics" + + # Fallback to the prometheus default unless specified + interval: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## Basic authentication + basicAuth: {} + + ## Bearer token file + bearerTokenFile: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + # bearerTokenFile: + # Fallback to the prometheus default unless specified + scrapeTimeout: "" + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + # Retain the job and instance labels of the metrics pushed to the Pushgateway + # [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape) + honorLabels: true + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +# The values to set in the PodDisruptionBudget spec (minAvailable/maxUnavailable) +# If not set then a PodDisruptionBudget will not be created +podDisruptionBudget: {} + +priorityClassName: + +# Deployment Strategy type +strategy: + type: Recreate + +persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + +extraVolumes: [] + # - name: extra + # emptyDir: {} +extraVolumeMounts: [] + # - name: extra + # mountPath: /usr/share/extras + # readOnly: true + +# Configuration for clusters with restrictive network policies in place: +# - allowAll allows access to the PushGateway from any namespace +# - customSelector is a list of pod/namespaceSelectors to allow access from +# These options are mutually exclusive and the latter will take precedence. +networkPolicy: {} + # allowAll: true + # customSelectors: + # - namespaceSelector: + # matchLabels: + # type: admin + # - podSelector: + # matchLabels: + # app: myapp + +# Array of extra K8s objects to deploy (evaluated as a template) +# The value can hold an array of strings as well as objects +extraManifests: [] + +# Lifecycle hooks configuration +lifecycle: {} +# preStop: +# exec: +# command: ["/bin/sh", "-c", "sleep 30"] diff --git a/opencloud/charts/prometheus/templates/NOTES.txt b/opencloud/charts/prometheus/templates/NOTES.txt new file mode 100644 index 0000000..b0401f6 --- /dev/null +++ b/opencloud/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,118 @@ +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +For access from outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ tpl . $ }} +{{- end }} +{{- else if .Values.server.route.main.enabled }} +For access from outside the cluster, the server URL(s) are: +{{- range .Values.server.route.main.hostnames }} +http://{{ tpl . $ }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ include "prometheus.port" . }} +{{- end }} + + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.port }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" .Subcharts.alertmanager }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistence.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if (index .Values "prometheus-node-exporter" "enabled") }} +################################################################################# +###### WARNING: Pod Security Policy has been disabled by default since ##### +###### it deprecated after k8s 1.25+. use ##### +###### (index .Values "prometheus-node-exporter" "rbac" ##### +###### . "pspEnabled") with (index .Values ##### +###### "prometheus-node-exporter" "rbac" "pspAnnotations") ##### +###### in case you still need it. ##### +################################################################################# +{{- end }} + +{{ if (index .Values "prometheus-pushgateway" "enabled") }} +The Prometheus PushGateway can be accessed via port {{ index .Values "prometheus-pushgateway" "service" "port" }} on the following DNS name from within your cluster: +{{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if (index .Values "prometheus-pushgateway" "ingress" "enabled") -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range (index .Values "prometheus-pushgateway" "ingress" "hosts") }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- $pushgateway_svc_type := index .Values "prometheus-pushgateway" "service" "type" -}} +{{- if contains "NodePort" $pushgateway_svc_type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" $pushgateway_svc_type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ index .Values "prometheus-pushgateway" "service" "port" }} +{{- else if contains "ClusterIP" $pushgateway_svc_type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "prometheus.name" (index .Subcharts "prometheus-pushgateway") }},component=pushgateway" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/opencloud/charts/prometheus/templates/_helpers.tpl b/opencloud/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000..29c89a1 --- /dev/null +++ b/opencloud/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,180 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the port for prometheus. +*/}} +{{- define "prometheus.port" -}} +9090 +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create labels for prometheus +*/}} +{{- define "prometheus.common.matchLabels" -}} +app.kubernetes.io/name: {{ include "prometheus.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.metaLabels" -}} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +helm.sh/chart: {{ include "prometheus.chart" . }} +app.kubernetes.io/part-of: {{ include "prometheus.name" . }} +{{- with .Values.commonMetaLabels}} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +app.kubernetes.io/component: {{ .Values.server.name }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified ClusterRole name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.clusterRoleName" -}} +{{- if .Values.server.clusterRoleNameOverride -}} +{{ .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{ include "prometheus.server.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name for communicating and check to ensure that `alertmanager` exists before trying to use it with the user via NOTES.txt +*/}} +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Subcharts.alertmanager -}} +{{- template "alertmanager.fullname" .Subcharts.alertmanager -}} +{{- else -}} +{{- "alertmanager not found" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} + {{- default .Release.Namespace .Values.forceNamespace -}} +{{- end }} + +{{/* +Define template prometheus.namespaces producing a list of namespaces to monitor +*/}} +{{- define "prometheus.namespaces" -}} +{{- $namespaces := list }} +{{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName }} + {{- if .Values.server.namespaces -}} + {{- range $ns := join "," .Values.server.namespaces | split "," }} + {{- $namespaces = append $namespaces (tpl $ns $) }} + {{- end -}} + {{- end -}} + {{- if .Values.server.releaseNamespace -}} + {{- $namespaces = append $namespaces (include "prometheus.namespace" .) }} + {{- end -}} +{{- end -}} +{{ mustToJson $namespaces }} +{{- end -}} + +{{/* +Define prometheus.server.remoteWrite producing a list of remoteWrite configurations with URL templating +*/}} +{{- define "prometheus.server.remoteWrite" -}} +{{- $remoteWrites := list }} +{{- range $remoteWrite := .Values.server.remoteWrite }} + {{- $remoteWrites = tpl $remoteWrite.url $ | set $remoteWrite "url" | append $remoteWrites }} +{{- end -}} +{{ toYaml $remoteWrites }} +{{- end -}} + +{{/* +Define prometheus.server.remoteRead producing a list of remoteRead configurations with URL templating +*/}} +{{- define "prometheus.server.remoteRead" -}} +{{- $remoteReads := list }} +{{- range $remoteRead := .Values.server.remoteRead }} + {{- $remoteReads = tpl $remoteRead.url $ | set $remoteRead "url" | append $remoteReads }} +{{- end -}} +{{ toYaml $remoteReads }} +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/clusterrole.yaml b/opencloud/charts/prometheus/templates/clusterrole.yaml new file mode 100644 index 0000000..eb6df22 --- /dev/null +++ b/opencloud/charts/prometheus/templates/clusterrole.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "discovery.k8s.io" + resources: + - endpointslices + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/opencloud/charts/prometheus/templates/clusterrolebinding.yaml b/opencloud/charts/prometheus/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..5050a9e --- /dev/null +++ b/opencloud/charts/prometheus/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "prometheus.clusterRoleName" . }} +{{- end }} diff --git a/opencloud/charts/prometheus/templates/cm.yaml b/opencloud/charts/prometheus/templates/cm.yaml new file mode 100644 index 0000000..f150657 --- /dev/null +++ b/opencloud/charts/prometheus/templates/cm.yaml @@ -0,0 +1,107 @@ +{{- if and (empty .Values.server.configMapOverrideName) (empty .Values.server.configFromSecret) -}} +apiVersion: v1 +kind: ConfigMap +metadata: +{{- with .Values.server.configMapAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.extraConfigmapLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +data: + allow-snippet-annotations: "false" +{{- $root := . -}} +{{- range $key, $value := .Values.ruleFiles }} + {{ $key }}: {{- toYaml $value | indent 2 }} +{{- end }} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{- include "prometheus.server.remoteWrite" $root | nindent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{- include "prometheus.server.remoteRead" $root | nindent 4 }} +{{- end }} +{{- if or $root.Values.server.tsdb $root.Values.server.exemplars }} + storage: +{{- if $root.Values.server.tsdb }} + tsdb: +{{ $root.Values.server.tsdb | toYaml | indent 8 }} +{{- end }} +{{- if $root.Values.server.exemplars }} + exemplars: +{{ $root.Values.server.exemplars | toYaml | indent 8 }} +{{- end }} +{{- end }} +{{- if $root.Values.server.otlp }} + otlp: +{{ $root.Values.server.otlp | toYaml | indent 8 }} +{{- end }} +{{- if $root.Values.scrapeConfigFiles }} + scrape_config_files: +{{ toYaml $root.Values.scrapeConfigFiles | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: {{ $root.Release.Name }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: {{ default "alertmanager" $root.Values.alertmanager.nameOverride | trunc 63 | trimSuffix "-" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/deploy.yaml b/opencloud/charts/prometheus/templates/deploy.yaml new file mode 100644 index 0000000..79bdb7d --- /dev/null +++ b/opencloud/charts/prometheus/templates/deploy.yaml @@ -0,0 +1,488 @@ +apiVersion: apps/v1 +{{- if .Values.server.statefulSet.enabled }} +kind: StatefulSet +metadata: + {{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.statefulSet.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- else if .Values.server.daemonSet.enabled }} +kind: DaemonSet +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.daemonSet.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- else }} +kind: Deployment +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.deploymentAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if .Values.server.statefulSet.enabled }} + {{- if semverCompare ">= 1.27.x" (include "prometheus.kubeVersion" .) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsDelete }} + whenScaled: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsScale }} + {{- end }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + serviceName: {{ template "prometheus.server.fullname" . }}-headless + {{- with .Values.server.statefulSet.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else if .Values.server.daemonSet.enabled }} + {{- with .Values.server.daemonSet.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + {{- with .Values.server.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{ if eq .type "Recreate" }}rollingUpdate: null{{ end }} + {{- end }} + {{- end }} + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + {{- if not .Values.server.daemonSet.enabled }} + replicas: {{ .Values.server.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.runtimeClassName }} + runtimeClassName: "{{ .Values.server.runtimeClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} +{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} + automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} +{{- end }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + {{- if .Values.configmapReload.prometheus.image.digest }} + image: "{{ tpl .Values.configmapReload.prometheus.image.repository . }}@{{ tpl .Values.configmapReload.prometheus.image.digest . }}" + {{- else }} + image: "{{ tpl .Values.configmapReload.prometheus.image.repository . }}:{{ tpl .Values.configmapReload.prometheus.image.tag . }}" + {{- end }} + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --watched-dir=/etc/config + {{- $default_url := (printf "http://127.0.0.1:%s/-/reload" (include "prometheus.port" .)) }} + {{- with .Values.server.prefixURL }} + {{- $default_url = printf "http://127.0.0.1:%s%s/-/reload" (include "prometheus.port" .) . }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + - --listen-address=0.0.0.0:{{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --watched-dir={{ . }} + {{- end }} + {{- with .Values.configmapReload.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- if .Values.configmapReload.prometheus.containerPortName }} + name: {{ .Values.configmapReload.prometheus.containerPortName }} + {{- end }} + {{- end }} + {{- with .Values.configmapReload.prometheus.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.configmapReload.prometheus.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.startupProbe.enabled }} + {{- $startupProbe := omit .Values.configmapReload.prometheus.startupProbe "enabled" }} + startupProbe: + {{- toYaml $startupProbe | nindent 12 }} + {{- end }} + {{- with .Values.configmapReload.prometheus.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- with .Values.configmapReload.prometheus.extraVolumeMounts }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + {{- if .Values.server.image.digest }} + image: "{{ tpl .Values.server.image.repository . }}@{{ tpl .Values.server.image.digest . }}" + {{- else }} + image: "{{ tpl .Values.server.image.repository . }}:{{ tpl .Values.server.image.tag . | default .Chart.AppVersion}}" + {{- end }} + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- with .Values.server.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + {{- if .Values.server.retentionSize }} + - --storage.tsdb.retention.size={{ .Values.server.retentionSize }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ include "prometheus.port" . }} + {{- if .Values.server.portName }} + name: {{ .Values.server.portName }} + {{- end }} + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: {{ default (include "prometheus.port" .) .Values.server.portName }} + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + {{- with .Values.server.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- $storageVolumeName := ternary .Values.server.persistentVolume.statefulSetNameOverride "storage-volume" (and .Values.server.persistentVolume.enabled .Values.server.statefulSet.enabled (not (empty .Values.server.persistentVolume.statefulSetNameOverride))) }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: {{ $storageVolumeName }} + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.server.hostNetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + {{- else }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if or .Values.server.affinity .Values.server.podAntiAffinity }} + affinity: + {{- end }} + {{- with .Values.server.affinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.server.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.server.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ template "prometheus.name" . }}]} + {{- else if eq .Values.server.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.server.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ template "prometheus.name" . }}]} + {{- end }} + {{- with .Values.server.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + {{- if and .Values.server.persistentVolume.enabled (not .Values.server.statefulSet.enabled) }} + - name: {{ $storageVolumeName }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else if not .Values.server.persistentVolume.enabled }} + - name: {{ $storageVolumeName }} + emptyDir: + {{- if or .Values.server.emptyDir.sizeLimit .Values.server.emptyDir.medium }} + {{- if .Values.server.emptyDir.medium }} + medium: {{ .Values.server.emptyDir.medium }} + {{- end }} + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- end }} + {{- else }} + {} + {{- end -}} + {{- end -}} + {{- if and .Values.server.statefulSet.enabled .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: {{ $storageVolumeName }} + {{- with .Values.server.persistentVolume.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.server.persistentVolume.labels }} + labels: + {{- toYaml . | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- toYaml .Values.server.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/opencloud/charts/prometheus/templates/extra-manifests.yaml b/opencloud/charts/prometheus/templates/extra-manifests.yaml new file mode 100644 index 0000000..2b21b71 --- /dev/null +++ b/opencloud/charts/prometheus/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl . $ }} +{{ end }} diff --git a/opencloud/charts/prometheus/templates/headless-svc.yaml b/opencloud/charts/prometheus/templates/headless-svc.yaml new file mode 100644 index 0000000..1fe4328 --- /dev/null +++ b/opencloud/charts/prometheus/templates/headless-svc.yaml @@ -0,0 +1,32 @@ +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless + namespace: {{ include "prometheus.namespace" . }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: {{ default (include "prometheus.port" .) .Values.server.portName }} + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/httproute.yaml b/opencloud/charts/prometheus/templates/httproute.yaml new file mode 100644 index 0000000..9ee4383 --- /dev/null +++ b/opencloud/charts/prometheus/templates/httproute.yaml @@ -0,0 +1,45 @@ +{{- range $name, $route := .Values.server.route }} +{{- if $route.enabled }} +--- +apiVersion: {{ $route.apiVersion | default "gateway.networking.k8s.io/v1" }} +kind: {{ $route.kind | default "HTTPRoute" }} +metadata: + {{- with $route.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus.server.fullname" $ }} + namespace: {{ include "prometheus.namespace" $ }} + labels: {{ include "prometheus.server.labels" $ | nindent 4 }} + {{- with $route.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with $route.parentRefs }} + parentRefs: {{ toYaml . | nindent 4 }} + {{- end }} + {{- with $route.hostnames }} + hostnames: {{ tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- with $route.additionalRules }} + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + {{- if $route.httpsRedirect }} + - filters: + - type: RequestRedirect + requestRedirect: + scheme: https + statusCode: 301 + {{- else }} + - backendRefs: + - name: {{ include "prometheus.server.fullname" $ }} + port: {{ $.Values.server.service.servicePort }} + {{- with $route.filters }} + filters: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with $route.matches }} + matches: {{ toYaml . | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/templates/ingress.yaml b/opencloud/charts/prometheus/templates/ingress.yaml new file mode 100644 index 0000000..f5613d9 --- /dev/null +++ b/opencloud/charts/prometheus/templates/ingress.yaml @@ -0,0 +1,47 @@ +{{- if .Values.server.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.ingress.servicePort | default .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ tpl (first $url) $ }} + http: + paths: +{{ if $extraPaths }} +{{ tpl (toYaml $extraPaths | indent 10) $ }} +{{- end }} + - path: {{ tpl ($ingressPath) $ }} + pathType: {{ $ingressPathType }} + backend: + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ tpl (toYaml .Values.server.ingress.tls | indent 4) $ }} + {{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/network-policy.yaml b/opencloud/charts/prometheus/templates/network-policy.yaml new file mode 100644 index 0000000..0accbf5 --- /dev/null +++ b/opencloud/charts/prometheus/templates/network-policy.yaml @@ -0,0 +1,16 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: {{ default (include "prometheus.port" .) .Values.server.portName }} +{{- end }} diff --git a/opencloud/charts/prometheus/templates/pdb.yaml b/opencloud/charts/prometheus/templates/pdb.yaml new file mode 100644 index 0000000..db16acd --- /dev/null +++ b/opencloud/charts/prometheus/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +{{- $pdbSpec := omit .Values.server.podDisruptionBudget "enabled" }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + {{- if not (or (hasKey $pdbSpec "minAvailable") (hasKey $pdbSpec "maxUnavailable")) }} + maxUnavailable: 1 + {{- end }} + {{- if hasKey $pdbSpec "minAvailable" }} + minAvailable: {{ $pdbSpec.minAvailable }} + {{- end }} + {{- if hasKey $pdbSpec "maxUnavailable" }} + maxUnavailable: {{ $pdbSpec.maxUnavailable }} + {{- end }} + {{- if hasKey $pdbSpec "unhealthyPodEvictionPolicy" }} + unhealthyPodEvictionPolicy: {{ $pdbSpec.unhealthyPodEvictionPolicy }} + {{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/templates/pvc.yaml b/opencloud/charts/prometheus/templates/pvc.yaml new file mode 100644 index 0000000..d2bc403 --- /dev/null +++ b/opencloud/charts/prometheus/templates/pvc.yaml @@ -0,0 +1,40 @@ +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.persistentVolume.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- if .Values.server.persistentVolume.selector }} + selector: + {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- if .Values.server.persistentVolume.volumeName }} + volumeName: "{{ .Values.server.persistentVolume.volumeName }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/rolebinding.yaml b/opencloud/charts/prometheus/templates/rolebinding.yaml new file mode 100644 index 0000000..b7719dd --- /dev/null +++ b/opencloud/charts/prometheus/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- range include "prometheus.namespaces" . | fromJsonArray }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} + namespace: {{ include "prometheus.namespace" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} diff --git a/opencloud/charts/prometheus/templates/service.yaml b/opencloud/charts/prometheus/templates/service.yaml new file mode 100644 index 0000000..37ddb86 --- /dev/null +++ b/opencloud/charts/prometheus/templates/service.yaml @@ -0,0 +1,66 @@ +{{- if .Values.server.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} +{{- if .Values.server.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.server.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: {{ default (include "prometheus.port" .) .Values.server.portName }} + {{- if ( and (eq .Values.server.service.type "NodePort" ) (not (empty .Values.server.service.nodePort)) ) }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if ( and (eq .Values.server.service.gRPC.type "NodePort" ) (not (empty .Values.server.service.gRPC.nodePort)) ) }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} +{{- if .Values.server.service.additionalPorts }} +{{ toYaml .Values.server.service.additionalPorts | indent 4 }} +{{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/opencloud/charts/prometheus/templates/serviceaccount.yaml b/opencloud/charts/prometheus/templates/serviceaccount.yaml new file mode 100644 index 0000000..6d5ab0c --- /dev/null +++ b/opencloud/charts/prometheus/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} +automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} +{{- else if kindIs "bool" .Values.serviceAccounts.server.automountServiceAccountToken }} +automountServiceAccountToken: {{ .Values.serviceAccounts.server.automountServiceAccountToken }} +{{- end }} +{{- end }} diff --git a/opencloud/charts/prometheus/templates/vpa.yaml b/opencloud/charts/prometheus/templates/vpa.yaml new file mode 100644 index 0000000..9bf26e9 --- /dev/null +++ b/opencloud/charts/prometheus/templates/vpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "prometheus.server.fullname" . }}-vpa + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} diff --git a/opencloud/charts/prometheus/values.schema.json b/opencloud/charts/prometheus/values.schema.json new file mode 100644 index 0000000..3e98792 --- /dev/null +++ b/opencloud/charts/prometheus/values.schema.json @@ -0,0 +1,739 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "alertRelabelConfigs": { + "type": "object" + }, + "alertmanager": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "podSecurityContext": { + "type": "object", + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + } + } + } + }, + "configmapReload": { + "type": "object", + "properties": { + "env": { + "type": "array" + }, + "prometheus": { + "type": "object", + "properties": { + "containerSecurityContext": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "extraArgs": { + "type": "object" + }, + "extraConfigmapMounts": { + "type": "array" + }, + "extraVolumeDirs": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "image": { + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "name": { + "type": "string" + }, + "resources": { + "type": "object" + } + } + }, + "reloadUrl": { + "type": "string" + } + } + }, + "extraManifests": { + "type": "array" + }, + "extraScrapeConfigs": { + "type": "string" + }, + "forceNamespace": { + "type": "string" + }, + "imagePullSecrets": { + "type": "array" + }, + "kube-state-metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "prometheus-node-exporter": { + "type": "object", + "properties": { + "containerSecurityContext": { + "type": "object", + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + } + } + }, + "enabled": { + "type": "boolean" + }, + "rbac": { + "type": "object", + "properties": { + "pspEnabled": { + "type": "boolean" + } + } + } + } + }, + "prometheus-pushgateway": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "serviceAnnotations": { + "type": "object", + "properties": { + "prometheus.io/probe": { + "type": "string" + } + } + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "ruleFiles": { + "type": "object" + }, + "server": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "alertmanagers": { + "type": "array" + }, + "baseURL": { + "type": "string" + }, + "clusterRoleNameOverride": { + "type": "string" + }, + "command": { + "type": "array" + }, + "configMapAnnotations": { + "type": "object" + }, + "configMapOverrideName": { + "type": "string" + }, + "configPath": { + "type": "string" + }, + "containerSecurityContext": { + "type": "object" + }, + "defaultFlagsOverride": { + "type": "array" + }, + "deploymentAnnotations": { + "type": "object" + }, + "dnsConfig": { + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "emptyDir": { + "type": "object", + "properties": { + "sizeLimit": { + "type": "string" + } + } + }, + "enableServiceLinks": { + "type": "boolean" + }, + "env": { + "type": "array" + }, + "exemplars": { + "type": "object" + }, + "extraArgs": { + "type": "object" + }, + "extraConfigmapLabels": { + "type": "object" + }, + "extraConfigmapMounts": { + "type": "array" + }, + "extraFlags": { + "type": "array", + "items": { + "type": "string" + } + }, + "extraHostPathMounts": { + "type": "array" + }, + "extraInitContainers": { + "type": "array" + }, + "extraSecretMounts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "fullnameOverride": { + "type": "string" + }, + "global": { + "type": "object", + "properties": { + "evaluation_interval": { + "type": "string" + }, + "scrape_interval": { + "type": "string" + }, + "scrape_timeout": { + "type": "string" + } + } + }, + "hostAliases": { + "type": "array" + }, + "hostNetwork": { + "type": "boolean" + }, + "image": { + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "extraLabels": { + "type": "object" + }, + "extraPaths": { + "type": "array" + }, + "hosts": { + "type": "array" + }, + "path": { + "type": "string" + }, + "pathType": { + "type": "string" + }, + "tls": { + "type": "array" + } + } + }, + "livenessProbeFailureThreshold": { + "type": "integer" + }, + "livenessProbeInitialDelay": { + "type": "integer" + }, + "livenessProbePeriodSeconds": { + "type": "integer" + }, + "livenessProbeSuccessThreshold": { + "type": "integer" + }, + "livenessProbeTimeout": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "persistentVolume": { + "type": "object", + "properties": { + "accessModes": { + "type": "array", + "items": { + "type": "string" + } + }, + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "existingClaim": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "mountPath": { + "type": "string" + }, + "size": { + "type": "string" + }, + "statefulSetNameOverride": { + "type": "string" + }, + "subPath": { + "type": "string" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podAntiAffinity": { + "type": "string", + "enum": ["", "soft", "hard"], + "default": "" + }, + "podAntiAffinityTopologyKey": { + "type": "string" + }, + "podDisruptionBudget": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "string", + "integer" + ] + } + } + }, + "podLabels": { + "type": "object" + }, + "portName": { + "type": "string" + }, + "prefixURL": { + "type": "string" + }, + "priorityClassName": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "probeHeaders": { + "type": "array" + }, + "probeScheme": { + "type": "string" + }, + "readinessProbeFailureThreshold": { + "type": "integer" + }, + "readinessProbeInitialDelay": { + "type": "integer" + }, + "readinessProbePeriodSeconds": { + "type": "integer" + }, + "readinessProbeSuccessThreshold": { + "type": "integer" + }, + "readinessProbeTimeout": { + "type": "integer" + }, + "releaseNamespace": { + "type": "boolean" + }, + "remoteRead": { + "type": "array" + }, + "remoteWrite": { + "type": "array" + }, + "replicaCount": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "retention": { + "type": "string" + }, + "retentionSize": { + "type": "string" + }, + "revisionHistoryLimit": { + "type": "integer" + }, + "securityContext": { + "type": "object", + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + } + }, + "service": { + "type": "object", + "properties": { + "additionalPorts": { + "type": "array" + }, + "annotations": { + "type": "object" + }, + "clusterIP": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "externalIPs": { + "type": "array" + }, + "gRPC": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "loadBalancerIP": { + "type": "string" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "servicePort": { + "type": "integer" + }, + "sessionAffinity": { + "type": "string" + }, + "statefulsetReplica": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "replica": { + "type": "integer" + } + } + }, + "type": { + "type": "string" + } + } + }, + "sidecarContainers": { + "type": "object" + }, + "sidecarTemplateValues": { + "type": "object" + }, + "startupProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "statefulSet": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "headless": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "gRPC": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "pvcDeleteOnStsDelete": { + "type": "boolean" + }, + "pvcDeleteOnStsScale": { + "type": "boolean" + } + } + }, + "storagePath": { + "type": "string" + }, + "strategy": { + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, + "tcpSocketProbeEnabled": { + "type": "boolean" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tolerations": { + "type": "array" + }, + "topologySpreadConstraints": { + "type": "array" + }, + "tsdb": { + "type": "object" + }, + "verticalAutoscaler": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + } + } + }, + "scrapeConfigFiles": { + "type": "array" + }, + "serverFiles": { + "type": "object", + "properties": { + "alerting_rules.yml": { + "type": "object" + }, + "alerts": { + "type": "object" + }, + "prometheus.yml": { + "type": "object", + "properties": { + "rule_files": { + "type": "array", + "items": { + "type": "string" + } + }, + "scrape_configs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "job_name": { + "type": "string" + }, + "static_configs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + } + } + } + }, + "recording_rules.yml": { + "type": "object" + }, + "rules": { + "type": "object" + } + } + }, + "serviceAccounts": { + "type": "object", + "properties": { + "server": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "automountServiceAccountToken": { + "type": "boolean" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/opencloud/charts/prometheus/values.yaml b/opencloud/charts/prometheus/values.yaml new file mode 100644 index 0000000..92eb43d --- /dev/null +++ b/opencloud/charts/prometheus/values.yaml @@ -0,0 +1,1380 @@ +# yaml-language-server: $schema=values.schema.json +# Default values for prometheus. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rbac: + create: true + +imagePullSecrets: [] +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + server: + create: true + name: "" + annotations: {} + + ## Opt out of automounting Kubernetes API credentials. + ## It will be overriden by server.automountServiceAccountToken value, if set. + # automountServiceAccountToken: false + +## Additional labels to attach to all resources +commonMetaLabels: {} + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + ## URL for configmap-reload to use for reloads + ## + reloadUrl: "" + + ## env sets environment variables to pass to the container. Can be set as name/value pairs, + ## read from secrets or configmaps. + env: [] + # - name: SOMEVAR + # value: somevalue + # - name: PASSWORD + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: password + # optional: false + + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.86.2 + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + ## config-reloader's container port and port name for probes and metrics + containerPort: 8080 + containerPortName: metrics + + ## Additional configmap-reload container arguments + ## Set to null for argumentless flags + ## + extraArgs: {} + + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + ## Additional configmap-reload volume mounts + ## + extraVolumeMounts: [] + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + ## Security context to be added to configmap-reload container + containerSecurityContext: {} + + ## Settings for Prometheus reloader's readiness, liveness and startup probes + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + + livenessProbe: + httpGet: + path: /healthz + port: metrics + scheme: HTTP + periodSeconds: 10 + initialDelaySeconds: 2 + + readinessProbe: + httpGet: + path: /healthz + port: metrics + scheme: HTTP + periodSeconds: 10 + + startupProbe: + enabled: false + httpGet: + path: /healthz + port: metrics + scheme: HTTP + periodSeconds: 10 + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +server: + ## Prometheus server container name + ## + name: server + + ## Opt out of automounting Kubernetes API credentials. + ## If set it will override serviceAccounts.server.automountServiceAccountToken value for ServiceAccount. + # automountServiceAccountToken: false + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## If set it will override prometheus.server.fullname value for ClusterRole and ClusterRoleBinding + ## + clusterRoleNameOverride: "" + + # Enable only the release namespace for monitoring. By default all namespaces are monitored. + # If releaseNamespace and namespaces are both set a merged list will be monitored. + releaseNamespace: false + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + # OR for adding OAuth authentication to Prometheus + # sidecarContainers: + # oauth-proxy: + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.13.0 + # args: + # - --upstream=http://127.0.0.1:9090 + # - --http-address=0.0.0.0:8081 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # resources: {} + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + # if not set appVersion field from Chart.yaml is used + tag: "" + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + ## Prometheus server command + ## + command: [] + + ## prometheus server priorityClassName + ## + priorityClassName: "" + ## prometheus server runtimeClassName + ## + runtimeClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + # List of flags to override default parameters, e.g: + # - --enable-feature=agent + # - --storage.agent.retention.max-time=30m + # - --config.file=/etc/config/prometheus.yml + defaultFlagsOverride: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + ## + tsdb: {} + # out_of_order_time_window: 0s + + ## https://prometheus.io/docs/guides/opentelemetry + ## + otlp: {} + # promote_resource_attributes: [] + # keep_identifying_resource_attributes: false + # translation_strategy: NoUTF8EscapingWithSuffixes + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#exemplars + ## Must be enabled via --enable-feature=exemplar-storage + ## + exemplars: {} + # max_exemplars: 100000 + + ## Custom HTTP headers for Liveness/Readiness/Startup Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + # - name: "Authorization" + # value: "Bearer ABCDEabcde12345" + + ## Additional Prometheus server container arguments + ## Set to null for argumentless flags + ## + extraArgs: {} + # web.enable-remote-write-receiver: null + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## Prometheus server configuration from a secret + ## Do not set both `configMapOverrideName` and `configFromSecret` simultaneously. + ## Use either `configMapOverrideName` or `configFromSecret`. + ## If `configFromSecret` is defined, a ConfigMap resource will NOT be generated. + configFromSecret: "" + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## Extra labels for Prometheus server ConfigMap (ConfigMap that holds serverFiles) + extraConfigmapLabels: {} + + ## Override the prometheus.server.fullname for all objects related to the Prometheus server + fullnameOverride: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + ingressClassName: "" + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Prometheus server Ingress hostnames with optional path (passed through tpl) + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. (passed through tpl) + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration (hosts passed through tpl) + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## route (map) allows configuration of HTTPRoute resources + ## Requires Gateway API resources and suitable controller installed within the cluster + ## Ref. https://gateway-api.sigs.k8s.io/guides/http-routing/ + route: + main: + ## Enable this route + enabled: false + + ## ApiVersion set by default to "gateway.networking.k8s.io/v1" + apiVersion: "" + ## kind set by default to HTTPRoute + kind: "" + + ## Annotations to attach to the HTTPRoute resource + annotations: {} + ## Labels to attach to the HTTPRoute resource + labels: {} + + ## ParentRefs refers to resources this HTTPRoute is to be attached to (Gateways) + parentRefs: [] + # - name: contour + # sectionName: http + + ## Hostnames (templated) defines a set of hostnames that should match against the HTTP Host + ## header to select a HTTPRoute used to process the request + hostnames: [] + # - my.example.com + + ## additionalRules (templated) allows adding custom rules to the route + additionalRules: [] + + ## Filters define the filters that are applied to requests that match + ## this rule + filters: [] + + ## Matches define conditions used for matching the rule against incoming + ## HTTP requests + matches: + - path: + type: PathPrefix + value: / + + ## httpsRedirect adds a filter for redirecting to https (HTTP 301 Moved Permanently). + ## To redirect HTTP traffic to HTTPS, you need to have a Gateway with both HTTP and HTTPS listeners. + ## Matches and filters do not take effect if enabled. + ## Ref. https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/ + httpsRedirect: false + + ## Server Deployment Strategy type + strategy: + type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus server replicas on the same node. + ## The value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The default value "" will disable pod anti-affinity so that no anti-affinity rules will be configured (unless set in `server.affinity`). + ## + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Pod topology spread constraints + ## ref. https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + # maxUnavailable: 1 + # minAvailable: 1 + ## unhealthyPodEvictionPolicy is available since 1.27.0 (beta) + ## https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy + # unhealthyPodEvictionPolicy: IfHealthyBudget + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## If set it will override the name of the created persistent volume claim + ## generated by the stateful set. + ## + statefulSetNameOverride: "" + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume labels + ## + labels: {} + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + ## Persistent Volume Name + ## Useful if Persistent Volumes have been provisioned in advance and you want to use a specific one + ## + # volumeName: "" + + emptyDir: + ## Prometheus server emptyDir volume + ## Configure size limit and medium + ## + medium: "" + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Number of old history to retain to allow rollback + ## Default Kubernetes value is set to 10 + ## + revisionHistoryLimit: 10 + + ## Annotations to be added to ConfigMap + ## + configMapAnnotations: {} + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Statefulset's persistent volume claim retention policy + ## pvcDeleteOnStsDelete and pvcDeleteOnStsScale determine whether + ## statefulset's PVCs are deleted (true) or retained (false) on scaling down + ## and deleting statefulset, respectively. Requires 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + pvcDeleteOnStsDelete: false + pvcDeleteOnStsScale: false + + daemonSet: + ## If true, use a daemonset instead of a deployment for pod management. + ## This allows to run prometheus agent on every node in the cluster. + ## + enabled: false + annotations: {} + labels: {} + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + tcpSocketProbeEnabled: false + probeScheme: HTTP + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + startupProbe: + enabled: false + periodSeconds: 5 + failureThreshold: 30 + timeoutSeconds: 10 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, this will set to ClusterFirstWithHostNet automatically + dnsPolicy: ClusterFirst + + # Use hostPort + # hostPort: 9090 + + # Use portName + portName: "" + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + ## Security context to be added to server container + ## + containerSecurityContext: {} + + service: + ## If false, no Service will be created for the Prometheus server + ## + enabled: true + + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + ## externalTrafficPolicy is applicable to service with externally-facing addresses (NodePorts, ExternalIPs, and LoadBalancer IPs) + externalTrafficPolicy: "" + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Additional port to define in the Service + additionalPorts: [] + # additionalPorts: + # - name: authenticated + # port: 8081 + # targetPort: 8081 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + + ## Prometheus' data retention size. Supported units: B, KB, MB, GB, TB, PB, EB. + ## + retentionSize: "" + +## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) +ruleFiles: {} + +## Prometheus server ConfigMap entries for scrape_config_files +## (allows scrape configs defined in additional files) +## +scrapeConfigFiles: [] + +## Prometheus server ConfigMap entries +## +serverFiles: + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Metric relabel configs to apply to samples before ingestion. + # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + # metric_relabel_configs: + # - action: labeldrop + # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: "" + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: {} + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: "" + +# Extra manifests to deploy as an array +extraManifests: [] + # - | + # apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" + +# Configuration of subcharts defined in Chart.yaml + +## alertmanager sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager +## +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + persistence: + ## If true, storage will create or use Persistence Volume + ## If false, storage will use emptyDir + ## + enabled: true + + ## Custom annotations for the PVC created by the alertmanager StatefulSet. + ## Useful for configuring storage provider options such as disk type, KMS encryption keys, or custom volume name prefixes. + annotations: {} + + ## Custom labels for the PVC created by the alertmanager StatefulSet. + ## Useful for selecting, grouping, and organizing so that they can be queried or targeted in deployments, policies, etc. + labels: {} + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 2Gi + + ## Configure emptyDir volume + ## + emptyDir: {} + + podSecurityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +kube-state-metrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## prometheus-node-exporter sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter +## +prometheus-node-exporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + rbac: + pspEnabled: false + + containerSecurityContext: + allowPrivilegeEscalation: false + +## prometheus-pushgateway sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway +## +prometheus-pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + # Optional service annotations + serviceAnnotations: + prometheus.io/probe: pushgateway diff --git a/opencloud/templates/oc-auth/deployment.yaml b/opencloud/templates/oc-auth/deployment.yaml index bb92260..1297a48 100644 --- a/opencloud/templates/oc-auth/deployment.yaml +++ b/opencloud/templates/oc-auth/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocAuth.enabled }} +{{- if .Values.ocAuth.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-auth name: {{ .Release.Name }}-oc-auth spec: - replicas: 1 + replicas: {{ .Values.ocAuth.replicas }} selector: matchLabels: app: oc-auth @@ -50,5 +50,11 @@ spec: memory: "{{ .Values.ocAuth.resources.limits.memory }}" requests: cpu: "{{ .Values.ocAuth.resources.requests.cpu }}" - memory: "{{ .Values.ocAuth.resources.requests.memory }}" + memory: "{{ .Values.ocAuth.resources.requests.memory }}" + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 {{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-auth/service.yaml b/opencloud/templates/oc-auth/service.yaml index e72f585..9fe7d60 100644 --- a/opencloud/templates/oc-auth/service.yaml +++ b/opencloud/templates/oc-auth/service.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocAuth.enabled }} +{{- if .Values.ocAuth.enabled }} apiVersion: v1 kind: Service metadata: @@ -14,4 +14,27 @@ spec: selector: app: oc-auth type: ClusterIP -{{- end }} \ No newline at end of file +{{- end }} + +{{- if and .Values.ocAuth.enabled .Values.ocAuth.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-auth +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-auth + minReplicas: {{ .Values.ocAuth.hpa.minReplicas }} + maxReplicas: {{ .Values.ocAuth.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocAuth.hpa.targetCPUUtilizationPercentage }} +{{- end }} diff --git a/opencloud/templates/oc-catalog/deployment.yaml b/opencloud/templates/oc-catalog/deployment.yaml index a0abc77..1615825 100644 --- a/opencloud/templates/oc-catalog/deployment.yaml +++ b/opencloud/templates/oc-catalog/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocCatalog.enabled }} +{{- if .Values.ocCatalog.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-catalog name: {{ .Release.Name }}-oc-catalog spec: - replicas: 1 + replicas: {{ .Values.ocCatalog.replicas }} selector: matchLabels: app: oc-catalog @@ -36,4 +36,4 @@ spec: requests: cpu: "{{ .Values.ocCatalog.resources.requests.cpu }}" memory: "{{ .Values.ocCatalog.resources.requests.memory }}" - {{- end }} \ No newline at end of file +{{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-catalog/service.yaml b/opencloud/templates/oc-catalog/service.yaml index a606563..3a8dc4d 100644 --- a/opencloud/templates/oc-catalog/service.yaml +++ b/opencloud/templates/oc-catalog/service.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocCatalog.enabled }} +{{- if .Values.ocCatalog.enabled }} apiVersion: v1 kind: Service metadata: @@ -14,4 +14,27 @@ spec: selector: app: oc-catalog type: ClusterIP -{{- end }} \ No newline at end of file +{{- end }} + +{{- if and .Values.ocCatalog.enabled .Values.ocCatalog.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-catalog +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-catalog + minReplicas: {{ .Values.ocCatalog.hpa.minReplicas }} + maxReplicas: {{ .Values.ocCatalog.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocCatalog.hpa.targetCPUUtilizationPercentage }} +{{- end }} diff --git a/opencloud/templates/oc-datacenter/deployment.yaml b/opencloud/templates/oc-datacenter/deployment.yaml index cdd496b..93134c0 100644 --- a/opencloud/templates/oc-datacenter/deployment.yaml +++ b/opencloud/templates/oc-datacenter/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocDatacenter.enabled }} +{{- if .Values.ocDatacenter.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-datacenter name: {{ .Release.Name }}-oc-datacenter spec: - replicas: 1 + replicas: {{ .Values.ocDatacenter.replicas }} selector: matchLabels: app: oc-datacenter @@ -29,6 +29,12 @@ spec: - name: http containerPort: 8080 protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 resources: limits: cpu: "{{ .Values.ocDatacenter.resources.limits.cpu }}" diff --git a/opencloud/templates/oc-datacenter/service.yaml b/opencloud/templates/oc-datacenter/service.yaml index 2815957..9b762ec 100644 --- a/opencloud/templates/oc-datacenter/service.yaml +++ b/opencloud/templates/oc-datacenter/service.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocDatacenter.enabled }} +{{- if .Values.ocDatacenter.enabled }} apiVersion: v1 kind: Service metadata: @@ -14,4 +14,28 @@ spec: selector: app: oc-datacenter type: ClusterIP -{{- end }} \ No newline at end of file +{{- end }} + + +{{- if and .Values.ocDatacenter.enabled .Values.ocDatacenter.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-datacenter +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-datacenter + minReplicas: {{ .Values.ocDatacenter.hpa.minReplicas }} + maxReplicas: {{ .Values.ocDatacenter.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocDatacenter.hpa.targetCPUUtilizationPercentage }} +{{- end }} diff --git a/opencloud/templates/oc-front/deployment.yaml b/opencloud/templates/oc-front/deployment.yaml index c0ce772..8f7615f 100644 --- a/opencloud/templates/oc-front/deployment.yaml +++ b/opencloud/templates/oc-front/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocFront.enabled }} +{{- if .Values.ocFront.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-front name: {{ .Release.Name }}-oc-front spec: - replicas: 1 + replicas: {{ .Values.ocFront.replicas }} selector: matchLabels: app: oc-front @@ -30,6 +30,12 @@ spec: - name: http containerPort: 80 protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 resources: limits: cpu: "{{ .Values.ocFront.resources.limits.cpu }}" diff --git a/opencloud/templates/oc-front/service.yaml b/opencloud/templates/oc-front/service.yaml index 3c849bd..a090def 100644 --- a/opencloud/templates/oc-front/service.yaml +++ b/opencloud/templates/oc-front/service.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocFront.enabled }} +{{- if .Values.ocFront.enabled }} apiVersion: v1 kind: Service metadata: @@ -14,4 +14,27 @@ spec: selector: app: oc-front type: ClusterIP -{{- end }} \ No newline at end of file +{{- end }} + +{{- if and .Values.ocFront.enabled .Values.ocFront.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-front +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-front + minReplicas: {{ .Values.ocFront.hpa.minReplicas }} + maxReplicas: {{ .Values.ocFront.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocFront.hpa.targetCPUUtilizationPercentage }} +{{- end }} diff --git a/opencloud/templates/oc-peer/deployment.yaml b/opencloud/templates/oc-peer/deployment.yaml index ea4390d..917fe6f 100644 --- a/opencloud/templates/oc-peer/deployment.yaml +++ b/opencloud/templates/oc-peer/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocPeer.enabled }} +{{- if .Values.ocPeer.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-peer name: {{ .Release.Name }}-oc-peer spec: - replicas: 1 + replicas: {{ .Values.ocPeer.replicas }} selector: matchLabels: app: oc-peer @@ -25,6 +25,12 @@ spec: envFrom: - configMapRef: name: opencloud-config + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 ports: - name: http containerPort: 8080 diff --git a/opencloud/templates/oc-peer/service.yaml b/opencloud/templates/oc-peer/service.yaml index a513df7..ef85005 100644 --- a/opencloud/templates/oc-peer/service.yaml +++ b/opencloud/templates/oc-peer/service.yaml @@ -14,4 +14,27 @@ spec: selector: app: oc-peer type: ClusterIP +{{- end }} + +{{- if and .Values.ocPeer.enabled .Values.ocPeer.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-peer +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-peer + minReplicas: {{ .Values.ocPeer.hpa.minReplicas }} + maxReplicas: {{ .Values.ocPeer.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocPeer.hpa.targetCPUUtilizationPercentage }} {{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-scheduler/deployment.yaml b/opencloud/templates/oc-scheduler/deployment.yaml index d11ee65..6eef542 100644 --- a/opencloud/templates/oc-scheduler/deployment.yaml +++ b/opencloud/templates/oc-scheduler/deployment.yaml @@ -6,7 +6,7 @@ metadata: app: oc-scheduler name: {{ .Release.Name }}-oc-scheduler spec: - replicas: 1 + replicas: {{ .Values.ocScheduler.replicas }} selector: matchLabels: app: oc-scheduler @@ -26,6 +26,12 @@ spec: envFrom: - configMapRef: name: opencloud-config + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 ports: - name: http containerPort: 8080 diff --git a/opencloud/templates/oc-scheduler/service.yaml b/opencloud/templates/oc-scheduler/service.yaml index a6e99df..b37909a 100644 --- a/opencloud/templates/oc-scheduler/service.yaml +++ b/opencloud/templates/oc-scheduler/service.yaml @@ -14,4 +14,27 @@ spec: selector: app: oc-scheduler type: ClusterIP +{{- end }} + +{{- if and .Values.ocScheduler.enabled .Values.ocScheduler.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-scheduler +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-scheduler + minReplicas: {{ .Values.ocScheduler.hpa.minReplicas }} + maxReplicas: {{ .Values.ocScheduler.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocScheduler.hpa.targetCPUUtilizationPercentage }} {{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-schedulerd/deployment.yaml b/opencloud/templates/oc-schedulerd/deployment.yaml index d91e93b..c3b2929 100644 --- a/opencloud/templates/oc-schedulerd/deployment.yaml +++ b/opencloud/templates/oc-schedulerd/deployment.yaml @@ -6,7 +6,7 @@ metadata: app: oc-schedulerd name: {{ .Release.Name }}-oc-schedulerd spec: - replicas: 1 + replicas: {{ .Values.ocSchedulerd.replicas }} selector: matchLabels: app: oc-schedulerd @@ -25,6 +25,12 @@ spec: envFrom: - configMapRef: name: opencloud-config + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 resources: limits: cpu: "{{ .Values.ocSchedulerd.resources.limits.cpu }}" diff --git a/opencloud/templates/oc-schedulerd/service.yaml b/opencloud/templates/oc-schedulerd/service.yaml new file mode 100644 index 0000000..2e8073b --- /dev/null +++ b/opencloud/templates/oc-schedulerd/service.yaml @@ -0,0 +1,40 @@ +{{- if index .Values.ocSchedulerd.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: oc-schedulerd-svc + labels: + app: oc-schedulerd-svc +spec: + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app: oc-scheduler + type: ClusterIP +{{- end }} + +{{- if and .Values.ocSchedulerd.enabled .Values.ocSchedulerd.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-schedulerd +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-schedulerd + minReplicas: {{ .Values.ocSchedulerd.hpa.minReplicas }} + maxReplicas: {{ .Values.ocSchedulerd.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocSchedulerd.hpa.targetCPUUtilizationPercentage }} +{{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-shared/deployment.yaml b/opencloud/templates/oc-shared/deployment.yaml index 1dfb868..a898031 100644 --- a/opencloud/templates/oc-shared/deployment.yaml +++ b/opencloud/templates/oc-shared/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocShared.enabled }} +{{- if .Values.ocShared.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-shared name: {{ .Release.Name }}-oc-shared spec: - replicas: 1 + replicas: {{ .Values.ocShared.replicas }} selector: matchLabels: app: oc-shared @@ -25,6 +25,12 @@ spec: envFrom: - configMapRef: name: opencloud-config + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 ports: - name: http containerPort: 8080 diff --git a/opencloud/templates/oc-shared/service.yaml b/opencloud/templates/oc-shared/service.yaml index 6a46dbb..1125733 100644 --- a/opencloud/templates/oc-shared/service.yaml +++ b/opencloud/templates/oc-shared/service.yaml @@ -14,4 +14,27 @@ spec: selector: app: oc-shared type: ClusterIP +{{- end }} + +{{- if and .Values.ocShared.enabled .Values.ocShared.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-shared +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-shared + minReplicas: {{ .Values.ocShared.hpa.minReplicas }} + maxReplicas: {{ .Values.ocShared.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocShared.hpa.targetCPUUtilizationPercentage }} {{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-workflow/deployment.yaml b/opencloud/templates/oc-workflow/deployment.yaml index 2783960..9fa1725 100644 --- a/opencloud/templates/oc-workflow/deployment.yaml +++ b/opencloud/templates/oc-workflow/deployment.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocWorkflow.enabled }} +{{- if .Values.ocWorkflow.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -6,7 +6,7 @@ metadata: app: oc-workflow name: {{ .Release.Name }}-oc-workflow spec: - replicas: 1 + replicas: {{ .Values.ocWorkflow.replicas }} selector: matchLabels: app: oc-workflow @@ -29,6 +29,12 @@ spec: - name: http containerPort: 8080 protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 resources: limits: cpu: "{{ .Values.ocWorkflow.resources.limits.cpu }}" diff --git a/opencloud/templates/oc-workflow/service.yaml b/opencloud/templates/oc-workflow/service.yaml index 03cca9c..479f81b 100644 --- a/opencloud/templates/oc-workflow/service.yaml +++ b/opencloud/templates/oc-workflow/service.yaml @@ -1,4 +1,4 @@ -{{- if index .Values.ocWorkflow.enabled }} +{{- if .Values.ocWorkflow.enabled }} apiVersion: v1 kind: Service metadata: @@ -14,4 +14,27 @@ spec: selector: app: oc-workflow type: ClusterIP +{{- end }} + +{{- if and .Values.ocWorkflow.enabled .Values.ocWorkflow.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-workflow +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-workflow + minReplicas: {{ .Values.ocWorkflow.hpa.minReplicas }} + maxReplicas: {{ .Values.ocWorkflow.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocWorkflow.hpa.targetCPUUtilizationPercentage }} {{- end }} \ No newline at end of file diff --git a/opencloud/templates/oc-workspace/deployment.yaml b/opencloud/templates/oc-workspace/deployment.yaml index 4328240..527b2a8 100644 --- a/opencloud/templates/oc-workspace/deployment.yaml +++ b/opencloud/templates/oc-workspace/deployment.yaml @@ -6,7 +6,7 @@ metadata: app: oc-workspace name: {{ .Release.Name }}-oc-workspace spec: - replicas: 1 + replicas: {{ .Values.ocWorkspace.replicas }} selector: matchLabels: app: oc-workspace @@ -25,6 +25,12 @@ spec: - name: http containerPort: 8080 protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: http + initialDelaySeconds: 10 + periodSeconds: 30 resources: limits: cpu: "{{ .Values.ocWorkspace.resources.limits.cpu }}" diff --git a/opencloud/templates/oc-workspace/service.yaml b/opencloud/templates/oc-workspace/service.yaml index 668440d..09552d7 100644 --- a/opencloud/templates/oc-workspace/service.yaml +++ b/opencloud/templates/oc-workspace/service.yaml @@ -14,4 +14,27 @@ spec: selector: app: oc-workspace type: ClusterIP +{{- end }} + +{{- if and .Values.ocWorkspace.enabled .Values.ocWorkspace.hpa.enabled }} +--- +# Horizontal Pod Autoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ .Release.Name }}-oc-workspace +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ .Release.Name }}-oc-workspace + minReplicas: {{ .Values.ocWorkspace.hpa.minReplicas }} + maxReplicas: {{ .Values.ocWorkspace.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ocWorkspace.hpa.targetCPUUtilizationPercentage }} {{- end }} \ No newline at end of file diff --git a/opencloud/templates/openCloudConf.yaml b/opencloud/templates/openCloudConf.yaml index de4979b..ab0a484 100644 --- a/opencloud/templates/openCloudConf.yaml +++ b/opencloud/templates/openCloudConf.yaml @@ -22,7 +22,9 @@ data: OC_LDAP_BINDPW: "{{ index .Values.ocAuth.ldap.binPwd }}" OC_LDAP_BASEDN: "{{ index .Values.ocAuth.ldap.baseDn }}" OC_LDAP_ROLE_BASEDN: "{{ index .Values.ocAuth.ldap.roleBaseDn }}" - OC_MONGO_URL: "mongodb://{{ index .Values.mongodb.auth.usernames 0 }}:{{ index .Values.mongodb.auth.passwords 0 }}@{{ .Release.Name }}-mongodb.{{ .Release.Namespace }}:27017/{{ index .Values.mongodb.auth.databases 0 }}" + OC_MONGO_URL: "mongodb://{{ index .Values.mongodb.auth.rootUser }}:{{ index .Values.mongodb.auth.rootPassword }}@{{ .Release.Name }}-mongodb.{{ .Release.Namespace }}:27017/{{ index .Values.mongodb.auth.databases 0 }}" OC_MONGO_DATABASE: "{{ index .Values.mongodb.auth.databases 0 }}" OC_NATS_URL: "nats://{{ .Release.Name }}-nats.{{ .Release.Namespace }}:4222" OC_LOKI_URL: "http://{{ .Release.Name }}-loki.{{ .Release.Namespace }}:3100" + OC_LOKI_URL: "http://{{ .Release.Name }}-loki.{{ .Release.Namespace }}:3100" + OC_PROMETHEUS_URL: "http://{{ .Release.Name }}-monitor.{{ .Release.Namespace }}:9090" \ No newline at end of file diff --git a/opencloud/templates/openldap.yaml b/opencloud/templates/openldap.yaml new file mode 100644 index 0000000..9e30f77 --- /dev/null +++ b/opencloud/templates/openldap.yaml @@ -0,0 +1,27 @@ +{{- if .Values.externalLDAP.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: oc-openldap + name: {{ .Release.Name }}-oc-openldap +spec: + replicas: 1 + template: + spec: + containers: + - name: oc-openldap + image: {{ .Values.openldap.image.repository }} + env: + - name: LDAP_ORGANISATION + value: {{ .Values.openldap.env.LDAP_ORGANISATION }} + - name: LDAP_DOMAIN + value: {{ .Values.openldap.env.LDAP_DOMAIN }} + volumeMounts: + - name: ldif + mountPath: /container/service/slapd/assets/config/bootstrap/ldif/external + volumes: + - name: ldif + configMap: + name: openldap-ldif +{{- end }} \ No newline at end of file diff --git a/opencloud/templates/prometheus.yaml b/opencloud/templates/prometheus.yaml new file mode 100644 index 0000000..65c9aed --- /dev/null +++ b/opencloud/templates/prometheus.yaml @@ -0,0 +1,15 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ .Release.Name }}-monitor.{{ .Release.Namespace }} + labels: + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: monitor + endpoints: + - port: http + interval: 30s +{{- end }} \ No newline at end of file diff --git a/opencloud/templates/prometheus/deployment.yaml b/opencloud/templates/prometheus/deployment.yaml deleted file mode 100644 index bdee65b..0000000 --- a/opencloud/templates/prometheus/deployment.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "my-prometheus.fullname" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "my-prometheus.name" . }} - template: - metadata: - labels: - app: {{ include "my-prometheus.name" . }} - spec: - containers: - - name: prometheus - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - containerPort: 9090 - resources: - {{- toYaml .Values.resources | nindent 12 }} \ No newline at end of file diff --git a/opencloud/templates/prometheus/service.yaml b/opencloud/templates/prometheus/service.yaml deleted file mode 100644 index 616a6c4..0000000 --- a/opencloud/templates/prometheus/service.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "my-prometheus.fullname" . }} -spec: - type: {{ .Values.service.type }} - selector: - app: {{ include "my-prometheus.name" . }} - ports: - - port: {{ .Values.service.port }} - targetPort: 9090 \ No newline at end of file diff --git a/opencloud/values.yaml.template b/opencloud/values.yaml.template new file mode 100644 index 0000000..4a47871 --- /dev/null +++ b/opencloud/values.yaml.template @@ -0,0 +1,588 @@ +env: {{ .Release.Name }} # For storage class provisioning +host: ${HOST:-exemple.com} # For reverse proxy rule +registryHost: ${REGISTRY_HOST:-registry.exemple.com} # For reverse proxy rule +scheme: https # For reverse proxy rule + +mongo-express: + enabled: ${OC_MONGOEXPRESS_ENABLED:-true} + mongodbServer: "{{ .Release.Name }}-mongodb.{{ .Release.Namespace }}" # TO LOOK AFTER + mongodbPort: 27017 + mongodbEnableAdmin: true + mongodbAdminUsername: ${OC_MONGO_ADMIN:-admin} + mongodbAdminPassword: ${OC_MONGO_PWD:-admin} + siteBaseUrl: /mongoexpress + basicAuthUsername: ${OC_MONGOEXPRESS_ADMIN:-admin} + basicAuthPassword: ${OC_MONGOEXPRESS_PWD:-admin} + mongodb: + enabled: false + +mongodb: + enabled: ${OC_MONGO_ENABLED:-true} + global: + defaultStorageClass: longhorn-nor1 + storageClass: longhorn-nor1 + architecture: standalone + useStatefulSet: false + auth: + enabled: true + rootUser: ${OC_MONGO_ADMIN:-admin} + rootPassword: ${OC_MONGO_PWD:-admin} + databases: [ ${OC_MONGO_DATABASE:-opencloud} ] + usernames: [] + passwords: [] + resourcesPreset: "small" + replicaCount: 1 + persistence: + enabled: true + storageClass: longhorn-nor1 + existingClaim: mongo-pvc + accessModes: + - ReadWriteOnce + size: ${OC_MONGO_SIZE:-5000Mi} + persistentVolumeClaimRetentionPolicy: + enabled: true + whenDeleted: Retain + whenScaled: Retain + arbiter: + enabled: false + livenessProbe: + enabled: true + readinessProbe: + enabled: true + +nats: + enabled: ${OC_NATS_ENABLED:-true} + jetstream: + enabled: true + fileStore: + size: ${OC_NATS_SIZE:-20Mi} + storageClassName: longhorn-nor1 + +openldap: + enabled: ${OC_LDAP_ENABLED:-true} + test: + enabled: false + ltb-passwd: + enabled: false + replicaCount: 1 + image: + repository: ${OC_LDAP_IMAGE:-osixia/openldap} + tls: + enabled: false + env: + LDAP_ORGANISATION: ${OC_LDAP_ORGANISATION:-Opencloud} + LDAP_DOMAIN: ${OC_LDAP_DOMAIN:-opencloud.com} + LDAP_BACKEND: "mdb" + LDAP_TLS: ${OC_LDAP_TLS:-false} + LDAP_TLS_ENFORCE: ${OC_LDAP_TLS:-false} + LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" + adminPassword: ${OC_LDAP_ADMIN_PWD:-admin} + configPassword: "${OC_LDAP_CONFIG_PWD:-config}" + phpldapadmin: + enabled: false + persistence: + enabled: true + accessMode: ReadWriteOnce + size: ${OC_LDAP_SIZE:-10Mi} + storageClass: longhorn-nor1 + replication: + enabled: false + externalLDAP: + enabled: ${OC_LDAP_EXTERNAL:-false} + url: ${OC_LDAP_EXTERNAL_ENDPOINT} + bindDN: ${OC_LDAP_EXTERNAL_DN:-cn=admin,dc=example,dc=com} + bindPassword: ${OC_LDAP_EXTERNAL_PWD:-admin} + customLdifFiles: + 01-schema.ldif: |- + dn: ou=${OC_LDAP_GROUPS_OU:-groups},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: organizationalUnit + ou: ${OC_LDAP_GROUPS_OU:-groups} + + dn: ou=${OC_LDAP_USERS_OU:-users},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: organizationalUnit + ou: ${OC_LDAP_USERS_OU:-users} + + dn: cn=lastGID,${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: device + objectClass: top + description: Records the last GID used to create a Posix group. This prevents the re-use of a GID from a deleted group. + cn: lastGID + serialNumber: 2001 + + dn: cn=lastUID,${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: device + objectClass: top + serialNumber: 2001 + description: Records the last UID used to create a Posix account. This prevents the re-use of a UID from a deleted account. + cn: lastUID + + dn: cn=everybody,ou=${OC_LDAP_GROUPS_OU:-groups},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: top + objectClass: posixGroup + cn: everybody + memberUid: admin + gidNumber: 2003 + + 02-ldapadmin.ldif : |- + dn: cn=${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin},ou=${OC_LDAP_GROUPS_OU:-groups},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: top + objectClass: posixGroup + cn: ${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin} + memberUid: ${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin} + gidNumber: 2001 + + dn: uid=${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin},ou=${OC_LDAP_USERS_OU:-users},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + givenName: ldap + sn: admin + uid: ${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin} + cn: ${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin} + mail: ldapadmin@example.com + objectClass: person + objectClass: inetOrgPerson + objectClass: posixAccount + userPassword: sai1yeiT + uidNumber: 2001 + gidNumber: 2001 + loginShell: /bin/bash + homeDirectory: /home/ldapadmin + + 03-opencloudadmin.ldif : |- + dn: uid=admin,ou=${OC_LDAP_USERS_OU:-users},${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: inetOrgPerson + cn: Admin + sn: Istrator + uid: ${OC_LDAP_ADMIN_USER:-admin} + userPassword: ${OC_LDAP_ADMIN_PWD:-admin} + mail: admin@example.com + ou: ${OC_LDAP_USERS_OU:-users} + + dn: ou=AppRoles,${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: organizationalunit + ou: AppRoles + description: AppRoles + + dn: ou=App1,ou=AppRoles,${OC_LDAP_MNGT_DN:-dc=example,dc=com} + objectClass: organizationalunit + ou: App1 + description: App1 + +prometheus: + enabled: ${OC_PROMETHEUS_ENABLED:-true} + server: + persistentVolume: + enabled: true + size: ${OC_PROMETHEUS_SIZE:-5Gi} + service: + type: ClusterIP + resources: + limits: + cpu: ${OC_PROMETHEUS_LIMITS_CPU:-500m} + memory: ${OC_PROMETHEUS_LIMITS_MEMORY:-512Mi} + requests: + cpu: ${OC_PROMETHEUS_REQUESTS_CPU:-128m} + memory: ${OC_PROMETHEUS_REQUESTS_MEMORY:-256Mi} + +# ldap user manager configuration +ldapUserManager: + enabled: true + env: + SERVER_HOSTNAME: ${OC_LDAP_MNGT_HOST:-ldap.exemple.com} + LDAP_BASE_DN: ${OC_LDAP_MNGT_DN:-dc=example,dc=com} + LDAP_REQUIRE_STARTTLS: ${OC_LDAP_MNGT_REQUIRE_TLS:-false} + LDAP_ADMINS_GROUP: ${OC_LDAP_MNGT_ADMIN_GROUP:-ldapadmin} + LDAP_ADMIN_BIND_DN: ${OC_LDAP_MNGT_ADMIN_DN:-cn=admin,dc=example,dc=com} + LDAP_ADMIN_BIND_PWD: ${OC_LDAP_MNGT_ADMIN_PWD:-admin} + LDAP_IGNORE_CERT_ERRORS: ${OC_LDAP_MNGT_IGNORE_CERTS_ERRORS:-true} + EMAIL_DOMAIN: ${OC_LDAP_MNGT_EMAIL_DOMAIN:- } + NO_HTTPS: ${OC_LDAP_MNGT_NO_HTTPS:-true} + SERVER_PATH: "/users" + ORGANISATION_NAME: ${OC_LDAP_ORGANISATION:-Opencloud} + LDAP_USER_OU: ${OC_LDAP_USERS_OU:-users} + LDAP_GROUP_OU: ${OC_LDAP_GROUPS_OU:-groups} + ACCEPT_WEAK_PASSWORDS: "true" + resources: + limits: + cpu: ${OC_FRONT_LIMITS_CPU:-128m} + memory: ${OC_FRONT_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_FRONT_REQUESTS_CPU:-128m} + memory: ${OC_FRONT_REQUESTS_MEMORY:-256Mi} + +traefik: + enabled: ${OC_TRAEFIK_ENABLED:-true} + service: + type: NodePort + ingressRoute: + dashboard: + enabled: true + matchRule: Host(`localhost`) && PathPrefix(`/api`) || PathPrefix(`/dashboard`) + entryPoints: [web] + ports: + web: + nodePort: 30950 + +hydra: + enabled: ${OC_HYDRA_ENABLED:-true} + maester: + enabled: true + secret: + enabled: false + nameOverride: hydra-secret + hashSumEnabled: false + hydra: + dev: true + existingSecret: hydra-secret + config: + dsn: memory + urls: + # login: https://localhost-login/authentication/login + # consent: https://localhost-consent/consent/consent + # logout: https://localhost-logout/authentication/logout + self: + issuer: "http://{{ .Release.Name }}-hydra-public.{{ .Release.Namespace }}:4444/" + +keto: + enabled: ${OC_KETO_ENABLED:-true} + keto: + config: + serve: + read: + port: 4466 + write: + port: 4467 + metrics: + port: 4468 + namespaces: + - id: 0 + name: open-cloud + dsn: memory + + +loki: + enabled: ${OC_LOKI_ENABLED:-true} + loki: + auth_enabled: false + commonConfig: + replication_factor: 1 + storage: + type: filesystem + filesystem: + chunks_directory: /var/loki/chunks + rules_directory: /var/loki/rules + admin_api_directory: /var/loki/admin + storage_config: + boltdb_shipper: + active_index_directory: /var/loki/index + filesystem: + directory: /var/loki/chunks + limits_config: + allow_structured_metadata: false + schemaConfig: + configs: + - from: "2020-01-01" + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + max_concurrent: 2 + + deploymentMode: SingleBinary + singleBinary: + extraVolumes: + - name: loki-storage + persistentVolumeClaim: + claimName: loki-pvc + persistence: + enabled: false # Deactivate loki auto provisioning, rely on existing PVC + accessMode: ReadWriteOnce + size: ${OC_LOKI_SIZE:-1Gi} + storageClassName: longhorn-nor1 + claimName: loki-pvc + + extraVolumeMounts: + - name: loki-storage + mountPath: /var/loki + replicas: 1 + resources: + limits: + cpu: ${OC_LOKI_LIMITS_CPU:-3} + memory: ${OC_LOKI_LIMITS_MEMORY:-4Gi} + requests: + cpu: ${OC_LOKI_REQUESTS_CPU:-1} + memory: ${OC_LOKI_REQUESTS_MEMORY:-0.5Gi} + extraEnv: + - name: GOMEMLIMIT + value: 3750MiB + + chunksCache: + # default is 500MB, with limited memory keep this smaller + writebackSizeLimit: 10MB + + # Enable minio for storage + minio: + enabled: ${OC_MINIO_ENABLED:-false} + # Zero out replica counts of other deployment modes + backend: + replicas: 0 + read: + replicas: 0 + write: + replicas: 0 + ingester: + replicas: 0 + querier: + replicas: 0 + queryFrontend: + replicas: 0 + queryScheduler: + replicas: 0 + distributor: + replicas: 0 + compactor: + replicas: 0 + indexGateway: + replicas: 0 + bloomCompactor: + replicas: 0 + bloomGateway: + replicas: 0 + +grafana: + enabled: ${OC_GRAFANA_ENABLED:-true} + adminUser: ${OC_GRAFANA_ADMIN_USER:-admin} + adminPassword: ${OC_GRAFANA_ADMIN_PWD:-admin} + persistence: + enabled: true + size: ${OC_GRAFANA_SIZE:-1Gi} + service: + type: ClusterIP + +argo-workflows: + enabled: ${OC_ARGO_ENABLED:-false} + workflow: + serviceAccount: + create: false + name: argo-workflow + rbac: + create: false # Manual provisioning + controller: + workflowNamespaces: [] #All of them + controller: + workflowDefaults: + spec: + serviceAccountName: argo-workflow + +ocAuth: + enabled: ${OC_AUTH_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_AUTH_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-auth:0.0.1} + authType: hydra + keto: + adminRole: admin + hydra: + openCloudOauth2ClientSecretName: oc-oauth2-client-secret + ldap: + bindDn: ${OC_LDAP_MNGT_ADMIN_DN:-cn=admin,dc=example,dc=com} + binPwd: ${OC_LDAP_ADMIN_PWD:-admin} + baseDn: ${OC_LDAP_MNGT_DN:-dc=example,dc=com} + roleBaseDn: ${OC_LDAP_ROLE_DN:-ou=AppRoles,dc=example,dc=com} + resources: + limits: + cpu: ${OC_AUTH_LIMITS_CPU:-128m} + memory: ${AUTH_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${AUTH_REQUESTS_CPU:-128m} + memory: ${AUTH_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_AUTH_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_AUTH_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_AUTH_REPLICAS_USAGE:-80} + +ocFront: + enabled: ${OC_FRONT_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_FRONT_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-front:0.0.1} + resources: + limits: + cpu: ${OC_FRONT_LIMITS_CPU:-128m} + memory: ${OC_FRONT_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_FRONT_REQUESTS_CPU:-128m} + memory: ${OC_FRONT_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_FRONT_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_FRONT_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_FRONT_REPLICAS_USAGE:-80} + +ocWorkspace: + enabled: ${OC_WORKSPACE_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_WORKSPACE_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-workspace:0.0.1} + resources: + limits: + cpu: ${OC_WORKSPACE_LIMITS_CPU:-128m} + memory: ${OC_WORKSPACE_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_WORKSPACE_REQUESTS_CPU:-128m} + memory: ${OC_WORKSPACE_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_WORKSPACE_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_WORKSPACE_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_WORKSPACE_REPLICAS_USAGE:-80} + + +ocShared: + enabled: ${OC_SHARED_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_SHARED_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-shared:0.0.1} + resources: + limits: + cpu: ${OC_SHARED_LIMITS_CPU:-128m} + memory: ${OC_SHARED_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_SHARED_REQUESTS_CPU:-128m} + memory: ${OC_SHARED_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_SHARED_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_SHARED_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_SHARED_REPLICAS_USAGE:-80} + +ocWorkflow: + enabled: ${OC_WORKFLOW_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_WORKFLOW_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-workflow:0.0.1} + resources: + limits: + cpu: ${OC_WORKFLOW_LIMITS_CPU:-128m} + memory: ${WORKFLOW_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_WORKFLOW_REQUESTS_CPU:-128m} + memory: ${OC_WORKFLOW_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_WORKFLOW_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_WORKFLOW_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_WORKFLOW_REPLICAS_USAGE:-80} + +ocCatalog: + enabled: ${OC_CATALOG_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_CATALOG_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-catalog:0.0.1} + resources: + limits: + cpu: ${OC_CATALOG_LIMITS_CPU:-128m} + memory: ${OC_CATALOG_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_CATALOG_REQUESTS_CPU:-128m} + memory: ${OC_CATALOG_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_CATALOG_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_CATALOG_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_CATALOG_REPLICAS_USAGE:-80} + +ocPeer: + enabled: ${OC_PEER_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_PEER_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-peer:0.0.1} + resources: + limits: + cpu: ${OC_PEER_LIMITS_CPU:-128m} + memory: ${OC_PEER_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_PEER_REQUESTS_CPU:-128m} + memory: ${OC_PEER_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_CATALOG_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_CATALOG_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_CATALOG_REPLICAS_USAGE:-80} + +ocDatacenter: + enabled: ${OC_DATACENTER_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_DATACENTER_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-datacenter:0.0.1} + resources: + limits: + cpu: ${OC_DATACENTER_LIMITS_CPU:-128m} + memory: ${OC_DATACENTER_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_DATACENTER_REQUESTS_CPU:-128m} + memory: ${OC_DATACENTER_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_DATACENTER_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_DATACENTER_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_DATACENTER_REPLICAS_USAGE:-80} + +ocSchedulerd: + enabled: ${OC_SCHEDULERD_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_SCHEDULERD_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-schedulerd:0.0.1} + resources: + limits: + cpu: ${OC_SCHEDULERD_LIMITS_CPU:-128m} + memory: ${SCHEDULERD_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_SCHEDULERD_REQUESTS_CPU:-128m} + memory: ${OC_SCHEDULERD_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_SCHEDULERD_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_SCHEDULERD_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_SCHEDULERD_REPLICAS_USAGE:-80} + +ocScheduler: + enabled: ${OC_SCHEDULER_ENABLED:-true} + enableTraefikProxyIntegration: true + image: ${OC_SCHEDULER_IMAGE:-registry-opencloud.pf.irt-saintexupery.com/oc-scheduler:0.0.1} + resources: + limits: + cpu: ${OC_SCHEDULER_LIMITS_CPU:-128m} + memory: ${OC_SCHEDULER_LIMITS_MEMORY:-256Mi} + requests: + cpu: ${OC_SCHEDULER_REQUESTS_CPU:-128m} + memory: ${OC_SCHEDULER_REQUESTS_MEMORY:-256Mi} + replicas: 1 + hpa: + enabled: ${OC_SCHEDULER_REPLICAS_ENABLED:-true} + minReplicas: 1 + maxReplicas: ${OC_SCHEDULER_REPLICAS_MAX:-5} + targetCPUUtilizationPercentage: ${OC_SCHEDULER_REPLICAS_USAGE:-80} + +docker-registry-ui: + enabled: true + ui: + title: "opencloud docker registry" + proxy: true + dockerRegistryUrl: "http://{{ .Release.Name }}-docker-registry-ui-registry-server.{{ .Release.Namespace }}.svc.cluster.local:5000" + registry: + secretName: regcred + enabled: true + dataVolume: + persistentVolumeClaim: + claimName: docker-registry-pvc + persistence: + accessMode: ReadWriteOnce + storage: 5000Mi + storageClassName: longhorn-nor1 diff --git a/opencloud/dev-values.yaml b/opencloud/values/dev-values.yaml similarity index 98% rename from opencloud/dev-values.yaml rename to opencloud/values/dev-values.yaml index 0dbea7c..7abf096 100644 --- a/opencloud/dev-values.yaml +++ b/opencloud/values/dev-values.yaml @@ -1,10 +1,10 @@ -env: dev # For storage class provisioning +env: {{ .Release.Name }} # For storage class provisioning host: beta.opencloud.com # For reverse proxy rule scheme: http # For reverse proxy rule mongo-express: enabled: true - mongodbServer: dev-mongodb.dev + mongodbServer: "{{ .Release.Name }}-mongodb.d{{ .Release.Namespace }}ev" mongodbPort: 27017 mongodbEnableAdmin: true mongodbAdminUsername: root @@ -240,7 +240,7 @@ hydra: #consent: https://localhost-consent/consent/consent #logout: https://localhost-logout/authentication/logout self: - issuer: http://dev-hydra-public:4444/ + issuer: "http://{{ .Release.Name }}-hydra-public:4444/" keto: enabled: true diff --git a/opencloud/prod-values.yaml b/opencloud/values/prod-values.yaml similarity index 100% rename from opencloud/prod-values.yaml rename to opencloud/values/prod-values.yaml diff --git a/opencloud/values/test-values.yaml b/opencloud/values/test-values.yaml new file mode 100644 index 0000000..edf6bec --- /dev/null +++ b/opencloud/values/test-values.yaml @@ -0,0 +1,588 @@ +env: {{ .Release.Name }} # For storage class provisioning +host: exemple.com # For reverse proxy rule +registryHost: registry.exemple.com # For reverse proxy rule +scheme: https # For reverse proxy rule + +mongo-express: + enabled: true + mongodbServer: "{{ .Release.Name }}-mongodb.{{ .Release.Namespace }}" # TO LOOK AFTER + mongodbPort: 27017 + mongodbEnableAdmin: true + mongodbAdminUsername: admin + mongodbAdminPassword: admin + siteBaseUrl: /mongoexpress + basicAuthUsername: admin + basicAuthPassword: admin + mongodb: + enabled: false + +mongodb: + enabled: true + global: + defaultStorageClass: longhorn-nor1 + storageClass: longhorn-nor1 + architecture: standalone + useStatefulSet: false + auth: + enabled: true + rootUser: admin + rootPassword: admin + databases: [ opencloud ] + usernames: [] + passwords: [] + resourcesPreset: "small" + replicaCount: 1 + persistence: + enabled: true + storageClass: longhorn-nor1 + existingClaim: mongo-pvc + accessModes: + - ReadWriteOnce + size: 5000Mi + persistentVolumeClaimRetentionPolicy: + enabled: true + whenDeleted: Retain + whenScaled: Retain + arbiter: + enabled: false + livenessProbe: + enabled: true + readinessProbe: + enabled: true + +nats: + enabled: true + jetstream: + enabled: true + fileStore: + size: 20Mi + storageClassName: longhorn-nor1 + +openldap: + enabled: true + test: + enabled: false + ltb-passwd: + enabled: false + replicaCount: 1 + image: + repository: osixia/openldap + tls: + enabled: false + env: + LDAP_ORGANISATION: Opencloud + LDAP_DOMAIN: opencloud.com + LDAP_BACKEND: "mdb" + LDAP_TLS: false + LDAP_TLS_ENFORCE: false + LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" + adminPassword: admin + configPassword: "config" + phpldapadmin: + enabled: false + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 10Mi + storageClass: longhorn-nor1 + replication: + enabled: false + externalLDAP: + enabled: false + url: ${OC_LDAP_EXTERNAL_ENDPOINT} + bindDN: cn=admin,dc=example,dc=com + bindPassword: admin + customLdifFiles: + 01-schema.ldif: |- + dn: ou=groups,dc=example,dc=com + objectClass: organizationalUnit + ou: groups + + dn: ou=users,dc=example,dc=com + objectClass: organizationalUnit + ou: users + + dn: cn=lastGID,dc=example,dc=com + objectClass: device + objectClass: top + description: Records the last GID used to create a Posix group. This prevents the re-use of a GID from a deleted group. + cn: lastGID + serialNumber: 2001 + + dn: cn=lastUID,dc=example,dc=com + objectClass: device + objectClass: top + serialNumber: 2001 + description: Records the last UID used to create a Posix account. This prevents the re-use of a UID from a deleted account. + cn: lastUID + + dn: cn=everybody,ou=groups,dc=example,dc=com + objectClass: top + objectClass: posixGroup + cn: everybody + memberUid: admin + gidNumber: 2003 + + 02-ldapadmin.ldif : |- + dn: cn=ldapadmin,ou=groups,dc=example,dc=com + objectClass: top + objectClass: posixGroup + cn: ldapadmin + memberUid: ldapadmin + gidNumber: 2001 + + dn: uid=ldapadmin,ou=users,dc=example,dc=com + givenName: ldap + sn: admin + uid: ldapadmin + cn: ldapadmin + mail: ldapadmin@example.com + objectClass: person + objectClass: inetOrgPerson + objectClass: posixAccount + userPassword: sai1yeiT + uidNumber: 2001 + gidNumber: 2001 + loginShell: /bin/bash + homeDirectory: /home/ldapadmin + + 03-opencloudadmin.ldif : |- + dn: uid=admin,ou=users,dc=example,dc=com + objectClass: inetOrgPerson + cn: Admin + sn: Istrator + uid: admin + userPassword: admin + mail: admin@example.com + ou: Users + + dn: ou=AppRoles,dc=example,dc=com + objectClass: organizationalunit + ou: AppRoles + description: AppRoles + + dn: ou=App1,ou=AppRoles,dc=example,dc=com + objectClass: organizationalunit + ou: App1 + description: App1 + +prometheus: + enabled: true + server: + persistentVolume: + enabled: true + size: 5Gi + service: + type: ClusterIP + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 128m + memory: 256Mi + +# ldap user manager configuration +ldapUserManager: + enabled: true + env: + SERVER_HOSTNAME: ldap.exemple.com + LDAP_BASE_DN: dc=example,dc=com + LDAP_REQUIRE_STARTTLS: false + LDAP_ADMINS_GROUP: ldapadmin + LDAP_ADMIN_BIND_DN: cn=admin,dc=example,dc=com + LDAP_ADMIN_BIND_PWD: admin + LDAP_IGNORE_CERT_ERRORS: true + EMAIL_DOMAIN: + NO_HTTPS: true + SERVER_PATH: "/users" + ORGANISATION_NAME: Opencloud + LDAP_USER_OU: users + LDAP_GROUP_OU: groups + ACCEPT_WEAK_PASSWORDS: "true" + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + +traefik: + enabled: true + service: + type: NodePort + ingressRoute: + dashboard: + enabled: true + matchRule: Host(`localhost`) && PathPrefix(`/api`) || PathPrefix(`/dashboard`) + entryPoints: [web] + ports: + web: + nodePort: 30950 + +hydra: + enabled: true + maester: + enabled: true + secret: + enabled: false + nameOverride: hydra-secret + hashSumEnabled: false + hydra: + dev: true + existingSecret: hydra-secret + config: + dsn: memory + urls: + # login: https://localhost-login/authentication/login + # consent: https://localhost-consent/consent/consent + # logout: https://localhost-logout/authentication/logout + self: + issuer: "http://{{ .Release.Name }}-hydra-public.{{ .Release.Namespace }}:4444/" + +keto: + enabled: true + keto: + config: + serve: + read: + port: 4466 + write: + port: 4467 + metrics: + port: 4468 + namespaces: + - id: 0 + name: open-cloud + dsn: memory + + +loki: + enabled: true + loki: + auth_enabled: false + commonConfig: + replication_factor: 1 + storage: + type: filesystem + filesystem: + chunks_directory: /var/loki/chunks + rules_directory: /var/loki/rules + admin_api_directory: /var/loki/admin + storage_config: + boltdb_shipper: + active_index_directory: /var/loki/index + filesystem: + directory: /var/loki/chunks + limits_config: + allow_structured_metadata: false + schemaConfig: + configs: + - from: "2020-01-01" + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + max_concurrent: 2 + + deploymentMode: SingleBinary + singleBinary: + extraVolumes: + - name: loki-storage + persistentVolumeClaim: + claimName: loki-pvc + persistence: + enabled: false # Deactivate loki auto provisioning, rely on existing PVC + accessMode: ReadWriteOnce + size: 1Gi + storageClassName: longhorn-nor1 + claimName: loki-pvc + + extraVolumeMounts: + - name: loki-storage + mountPath: /var/loki + replicas: 1 + resources: + limits: + cpu: 3 + memory: 4Gi + requests: + cpu: 1 + memory: 0.5Gi + extraEnv: + - name: GOMEMLIMIT + value: 3750MiB + + chunksCache: + # default is 500MB, with limited memory keep this smaller + writebackSizeLimit: 10MB + + # Enable minio for storage + minio: + enabled: false + # Zero out replica counts of other deployment modes + backend: + replicas: 0 + read: + replicas: 0 + write: + replicas: 0 + ingester: + replicas: 0 + querier: + replicas: 0 + queryFrontend: + replicas: 0 + queryScheduler: + replicas: 0 + distributor: + replicas: 0 + compactor: + replicas: 0 + indexGateway: + replicas: 0 + bloomCompactor: + replicas: 0 + bloomGateway: + replicas: 0 + +grafana: + enabled: true + adminUser: admin + adminPassword: admin + persistence: + enabled: true + size: 1Gi + service: + type: ClusterIP + +argo-workflows: + enabled: false + workflow: + serviceAccount: + create: false + name: argo-workflow + rbac: + create: false # Manual provisioning + controller: + workflowNamespaces: [] #All of them + controller: + workflowDefaults: + spec: + serviceAccountName: argo-workflow + +ocAuth: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-auth:0.0.1 + authType: hydra + keto: + adminRole: admin + hydra: + openCloudOauth2ClientSecretName: oc-oauth2-client-secret + ldap: + bindDn: cn=admin,dc=example,dc=com + binPwd: admin + baseDn: dc=example,dc=com + roleBaseDn: ou=AppRoles,dc=example,dc=com + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocFront: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-front:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocWorkspace: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-workspace:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + + +ocShared: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-shared:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocWorkflow: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-workflow:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocCatalog: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-catalog:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocPeer: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-peer:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocDatacenter: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-datacenter:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocSchedulerd: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-schedulerd:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +ocScheduler: + enabled: true + enableTraefikProxyIntegration: true + image: registry-opencloud.pf.irt-saintexupery.com/oc-scheduler:0.0.1 + resources: + limits: + cpu: 128m + memory: 256Mi + requests: + cpu: 128m + memory: 256Mi + replicas: 1 + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +docker-registry-ui: + enabled: true + ui: + title: "opencloud docker registry" + proxy: true + dockerRegistryUrl: "http://{{ .Release.Name }}-docker-registry-ui-registry-server.{{ .Release.Namespace }}.svc.cluster.local:5000" + registry: + secretName: regcred + enabled: true + dataVolume: + persistentVolumeClaim: + claimName: docker-registry-pvc + persistence: + accessMode: ReadWriteOnce + storage: 5000Mi + storageClassName: longhorn-nor1 diff --git a/uninstall.sh b/uninstall.sh deleted file mode 100755 index a4ff11b..0000000 --- a/uninstall.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -RELEASE_NAME=${1:-dev} -RELEASE_NAMESPACE=${1:-dev} - -helm uninstall ${RELEASE_NAME} -n ${RELEASE_NAMESPACE} - -export KUBECONFIG=$(realpath ~/.kube/config) diff --git a/upgrade.sh b/upgrade.sh deleted file mode 100755 index d57e64c..0000000 --- a/upgrade.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -RELEASE_NAME=${1:-dev} -RELEASE_NAMESPACE=${1:-dev} - -helm upgrade ${RELEASE_NAME} opencloud -n ${RELEASE_NAMESPACE} --create-namespace -f opencloud/${RELEASE_NAME}-values.yaml \ No newline at end of file